Browse Source

Rewrite p2p-acceptblock in preparation for slight behavior changes

Removes checking whitelisted behavior (which will be removed, the
difference in behavior here makes little sense) and no longer
requires that blocks at the same work as our tip be dropped if not
requested (in part because we *do* request those blocks).

Github-Pull: #11531
Rebased-From: 3b4ac43bc3
0.15
Matt Corallo 7 years ago committed by MarcoFalke
parent
commit
e976c36ddf
  1. 236
      test/functional/p2p-acceptblock.py

236
test/functional/p2p-acceptblock.py

@ -4,42 +4,32 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php. # file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks. """Test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers Setup: two nodes, node0+node1, not connected to each other. Node1 will have
versus non-whitelisted peers, this tests the behavior of both (effectively two nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
separate tests running in parallel).
Setup: three nodes, node0+node1+node2, not connected to each other. Node0 does not We have one NodeConn connection to node0 called test_node, and one to node1
whitelist localhost, but node1 does. They will each be on their own chain for called min_work_node.
this test. Node2 will have nMinimumChainWork set to 0x10, so it won't process
low-work unrequested blocks.
We have one NodeConn connection to each, test_node, white_node, and min_work_node,
respectively.
The test: The test:
1. Generate one block on each node, to leave IBD. 1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer. 2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0 and node1, but node2 should skip processing The tip should advance for node0, but node1 should skip processing due to
due to nMinimumChainWork. nMinimumChainWork.
Node2 is unused in tests 3-7: Node1 is unused in tests 3-7:
3. Mine a block that forks the previous block, and deliver to each node from 3. Mine a block that forks from the genesis block, and deliver to test_node.
corresponding peer. Node0 should not process this block (just accept the header), because it
Node0 should not process this block (just accept the header), because it is is unrequested and doesn't have more or equal work to the tip.
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block. 4a,b. Send another two blocks that build on the forking block.
Node0 should process this block but be stuck on the shorter chain, because Node0 should process the second block but be stuck on the shorter chain,
it's missing an intermediate block. because it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain. 4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height). Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0. 5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on Node0 should not process the block because it is unrequested, and stay on
@ -52,7 +42,7 @@ Node2 is unused in tests 3-7:
7. Send Node0 the missing block again. 7. Send Node0 the missing block again.
Node0 should process and the tip should advance. Node0 should process and the tip should advance.
8. Test Node2 is able to sync when connected to node0 (which should have sufficient 8. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain). work on its chain).
""" """
@ -71,8 +61,8 @@ class AcceptBlockTest(BitcoinTestFramework):
def set_test_params(self): def set_test_params(self):
self.setup_clean_chain = True self.setup_clean_chain = True
self.num_nodes = 3 self.num_nodes = 2
self.extra_args = [[], ["-whitelist=127.0.0.1"], ["-minimumchainwork=0x10"]] self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self): def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks # Node0 will be used to test behavior of processing unrequested blocks
@ -84,132 +74,149 @@ class AcceptBlockTest(BitcoinTestFramework):
def run_test(self): def run_test(self):
# Setup the p2p connections and start up the network thread. # Setup the p2p connections and start up the network thread.
test_node = NodeConnCB() # connects to node0 (not whitelisted) test_node = NodeConnCB() # connects to node0
white_node = NodeConnCB() # connects to node1 (whitelisted) min_work_node = NodeConnCB() # connects to node1
min_work_node = NodeConnCB() # connects to node2 (not whitelisted)
connections = [] connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)) connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node)) connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], min_work_node))
connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], min_work_node))
test_node.add_connection(connections[0]) test_node.add_connection(connections[0])
white_node.add_connection(connections[1]) min_work_node.add_connection(connections[1])
min_work_node.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread NetworkThread().start() # Start up network handling in another thread
# Test logic begins here # Test logic begins here
test_node.wait_for_verack() test_node.wait_for_verack()
white_node.wait_for_verack()
min_work_node.wait_for_verack() min_work_node.wait_for_verack()
# 1. Have nodes mine a block (nodes1/2 leave IBD) # 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ] [ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ] tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip. # 2. Send one block that builds on each tip.
# This should be accepted by nodes 1/2 # This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1 block_time = int(time.time()) + 1
for i in range(3): for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time)) blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve() blocks_h2[i].solve()
block_time += 1 block_time += 1
test_node.send_message(msg_block(blocks_h2[0])) test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1])) min_work_node.send_message(msg_block(blocks_h2[1]))
min_work_node.send_message(msg_block(blocks_h2[2]))
for x in [test_node, white_node, min_work_node]: for x in [test_node, min_work_node]:
x.sync_with_ping() x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 1)
assert_equal(self.nodes[2].getblockcount(), 1) self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
self.log.info("First height 2 block accepted by node0/node1; correctly rejected by node2")
# 3. Send another block that builds on the original tip. # 3. Send another block that builds on genesis.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
for i in range(2): block_time += 1
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1)) block_h1f.solve()
blocks_h2f[i].solve() test_node.send_message(msg_block(block_h1f))
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
for x in [test_node, white_node]: test_node.sync_with_ping()
x.sync_with_ping() tip_entry_found = False
for x in self.nodes[0].getchaintips(): for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash: if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only") assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
for x in self.nodes[1].getchaintips(): test_node.sync_with_ping()
if x['hash'] == blocks_h2f[1].hash: # Since the earlier block was not processed by node, the new block
assert_equal(x['status'], "valid-headers") # can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.log.info("Second height 2 block accepted only from whitelisted peer") # But this block should be accepted by node since it has equal work.
# TODO: We currently drop this block but likely shouldn't
#self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4. Now send another block that builds on the forking chain. # 4b. Now send another block that builds on the forking chain.
blocks_h3 = [] block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
for i in range(2): block_h3.solve()
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1)) test_node.send_message(msg_block(block_h3))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
for x in [test_node, white_node]: test_node.sync_with_ping()
x.sync_with_ping() # Since the earlier block was not processed by node, the new block
# Since the earlier block was not processed by node0, the new block
# can't be fully validated. # can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips(): for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash: if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only") assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as its not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# But this block should be accepted by node0 since it has more work. # The block at height 5 should be accepted if we provide the missing header, though
self.nodes[0].getblock(blocks_h3[0].hash) headers_message = msg_headers()
self.log.info("Unrequested more-work block accepted from non-whitelisted peer") headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Node1 should have accepted and reorged. # Now send the blocks in all_blocks
assert_equal(self.nodes[1].getblockcount(), 3) for i in range(288):
self.log.info("Successfully reorged to length 3 chain from whitelisted peer") test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in range(2):
for i in range(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]: for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash) self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer")
# 5. Test handling of unrequested block on the node that didn't process # 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more # Should still not be processed (even though it has a child that has more
# work). # work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the # The node should have requested the blocks at some point, so
# node hasn't processed the block by the time the sleep returns, and then # disconnect/reconnect first
# the node processes it and incorrectly advances the tip). connections[0].disconnect_node()
# But this would be caught later on, when we verify that an inv triggers test_node.wait_for_disconnect()
# a getdata request for this block.
test_node = NodeConnCB() # connects to node (not whitelisted)
connections[0] = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
test_node.add_connection(connections[0])
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.send_message(msg_block(block_h2f)) # This should not be required
test_node.sync_with_ping() test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored") self.log.info("Unrequested block that would complete more-work chain was ignored")
@ -220,27 +227,28 @@ class AcceptBlockTest(BitcoinTestFramework):
with mininode_lock: with mininode_lock:
# Clear state so we can check the getdata request # Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None) test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)])) test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping() test_node.sync_with_ping()
with mininode_lock: with mininode_lock:
getdata = test_node.last_message["getdata"] getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block # Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256) assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block") self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested) # 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0])) test_node.send_message(msg_block(block_h1f))
test_node.send_message(msg_block(block_h2f)) # This should not be required
test_node.sync_with_ping() test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290) assert_equal(self.nodes[0].getblockcount(), 290)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer") self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Connect node2 to node0 and ensure it is able to sync # 8. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 2) connect_nodes(self.nodes[0], 1)
sync_blocks([self.nodes[0], self.nodes[2]]) sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 2 and 0") self.log.info("Successfully synced nodes 1 and 0")
[ c.disconnect_node() for c in connections ] [ c.disconnect_node() for c in connections ]

Loading…
Cancel
Save