Luke Dashjr
9 years ago
532 changed files with 26116 additions and 7533 deletions
@ -0,0 +1,45 @@
@@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env python |
||||
# Copyright (c) 2015 The Bitcoin Core developers |
||||
# Distributed under the MIT software license, see the accompanying |
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php. |
||||
|
||||
''' |
||||
This checks if all command line args are documented. |
||||
Return value is 0 to indicate no error. |
||||
|
||||
Author: @MarcoFalke |
||||
''' |
||||
|
||||
from subprocess import check_output |
||||
import re |
||||
|
||||
FOLDER_GREP = 'src' |
||||
FOLDER_TEST = 'src/test/' |
||||
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/%s' % FOLDER_GREP |
||||
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' %s | grep -v '%s'" % (CMD_ROOT_DIR, FOLDER_TEST) |
||||
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_ROOT_DIR) |
||||
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"') |
||||
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")') |
||||
# list unsupported, deprecated and duplicate args as they need no documentation |
||||
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay']) |
||||
|
||||
def main(): |
||||
used = check_output(CMD_GREP_ARGS, shell=True) |
||||
docd = check_output(CMD_GREP_DOCS, shell=True) |
||||
|
||||
args_used = set(re.findall(REGEX_ARG,used)) |
||||
args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL) |
||||
args_need_doc = args_used.difference(args_docd) |
||||
args_unknown = args_docd.difference(args_used) |
||||
|
||||
print "Args used : %s" % len(args_used) |
||||
print "Args documented : %s" % len(args_docd) |
||||
print "Args undocumented: %s" % len(args_need_doc) |
||||
print args_need_doc |
||||
print "Args unknown : %s" % len(args_unknown) |
||||
print args_unknown |
||||
|
||||
exit(len(args_need_doc)) |
||||
|
||||
if __name__ == "__main__": |
||||
main() |
@ -0,0 +1,164 @@
@@ -0,0 +1,164 @@
|
||||
#!/usr/bin/env python |
||||
# |
||||
#===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===# |
||||
# |
||||
# The LLVM Compiler Infrastructure |
||||
# |
||||
# This file is distributed under the University of Illinois Open Source |
||||
# License. |
||||
# |
||||
# ============================================================ |
||||
# |
||||
# University of Illinois/NCSA |
||||
# Open Source License |
||||
# |
||||
# Copyright (c) 2007-2015 University of Illinois at Urbana-Champaign. |
||||
# All rights reserved. |
||||
# |
||||
# Developed by: |
||||
# |
||||
# LLVM Team |
||||
# |
||||
# University of Illinois at Urbana-Champaign |
||||
# |
||||
# http://llvm.org |
||||
# |
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy of |
||||
# this software and associated documentation files (the "Software"), to deal with |
||||
# the Software without restriction, including without limitation the rights to |
||||
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies |
||||
# of the Software, and to permit persons to whom the Software is furnished to do |
||||
# so, subject to the following conditions: |
||||
# |
||||
# * Redistributions of source code must retain the above copyright notice, |
||||
# this list of conditions and the following disclaimers. |
||||
# |
||||
# * Redistributions in binary form must reproduce the above copyright notice, |
||||
# this list of conditions and the following disclaimers in the |
||||
# documentation and/or other materials provided with the distribution. |
||||
# |
||||
# * Neither the names of the LLVM Team, University of Illinois at |
||||
# Urbana-Champaign, nor the names of its contributors may be used to |
||||
# endorse or promote products derived from this Software without specific |
||||
# prior written permission. |
||||
# |
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS |
||||
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE |
||||
# SOFTWARE. |
||||
# |
||||
# ============================================================ |
||||
# |
||||
#===------------------------------------------------------------------------===# |
||||
|
||||
r""" |
||||
ClangFormat Diff Reformatter |
||||
============================ |
||||
|
||||
This script reads input from a unified diff and reformats all the changed |
||||
lines. This is useful to reformat all the lines touched by a specific patch. |
||||
Example usage for git/svn users: |
||||
|
||||
git diff -U0 HEAD^ | clang-format-diff.py -p1 -i |
||||
svn diff --diff-cmd=diff -x-U0 | clang-format-diff.py -i |
||||
|
||||
""" |
||||
|
||||
import argparse |
||||
import difflib |
||||
import re |
||||
import string |
||||
import subprocess |
||||
import StringIO |
||||
import sys |
||||
|
||||
|
||||
# Change this to the full path if clang-format is not on the path. |
||||
binary = 'clang-format' |
||||
|
||||
|
||||
def main(): |
||||
parser = argparse.ArgumentParser(description= |
||||
'Reformat changed lines in diff. Without -i ' |
||||
'option just output the diff that would be ' |
||||
'introduced.') |
||||
parser.add_argument('-i', action='store_true', default=False, |
||||
help='apply edits to files instead of displaying a diff') |
||||
parser.add_argument('-p', metavar='NUM', default=0, |
||||
help='strip the smallest prefix containing P slashes') |
||||
parser.add_argument('-regex', metavar='PATTERN', default=None, |
||||
help='custom pattern selecting file paths to reformat ' |
||||
'(case sensitive, overrides -iregex)') |
||||
parser.add_argument('-iregex', metavar='PATTERN', default= |
||||
r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc|js|ts|proto' |
||||
r'|protodevel|java)', |
||||
help='custom pattern selecting file paths to reformat ' |
||||
'(case insensitive, overridden by -regex)') |
||||
parser.add_argument('-sort-includes', action='store_true', default=False, |
||||
help='let clang-format sort include blocks') |
||||
parser.add_argument('-v', '--verbose', action='store_true', |
||||
help='be more verbose, ineffective without -i') |
||||
args = parser.parse_args() |
||||
|
||||
# Extract changed lines for each file. |
||||
filename = None |
||||
lines_by_file = {} |
||||
for line in sys.stdin: |
||||
match = re.search('^\+\+\+\ (.*?/){%s}(\S*)' % args.p, line) |
||||
if match: |
||||
filename = match.group(2) |
||||
if filename == None: |
||||
continue |
||||
|
||||
if args.regex is not None: |
||||
if not re.match('^%s$' % args.regex, filename): |
||||
continue |
||||
else: |
||||
if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE): |
||||
continue |
||||
|
||||
match = re.search('^@@.*\+(\d+)(,(\d+))?', line) |
||||
if match: |
||||
start_line = int(match.group(1)) |
||||
line_count = 1 |
||||
if match.group(3): |
||||
line_count = int(match.group(3)) |
||||
if line_count == 0: |
||||
continue |
||||
end_line = start_line + line_count - 1; |
||||
lines_by_file.setdefault(filename, []).extend( |
||||
['-lines', str(start_line) + ':' + str(end_line)]) |
||||
|
||||
# Reformat files containing changes in place. |
||||
for filename, lines in lines_by_file.iteritems(): |
||||
if args.i and args.verbose: |
||||
print 'Formatting', filename |
||||
command = [binary, filename] |
||||
if args.i: |
||||
command.append('-i') |
||||
if args.sort_includes: |
||||
command.append('-sort-includes') |
||||
command.extend(lines) |
||||
command.extend(['-style=file', '-fallback-style=none']) |
||||
p = subprocess.Popen(command, stdout=subprocess.PIPE, |
||||
stderr=None, stdin=subprocess.PIPE) |
||||
stdout, stderr = p.communicate() |
||||
if p.returncode != 0: |
||||
sys.exit(p.returncode); |
||||
|
||||
if not args.i: |
||||
with open(filename) as f: |
||||
code = f.readlines() |
||||
formatted_code = StringIO.StringIO(stdout).readlines() |
||||
diff = difflib.unified_diff(code, formatted_code, |
||||
filename, filename, |
||||
'(before formatting)', '(after formatting)') |
||||
diff_string = string.join(diff, '') |
||||
if len(diff_string) > 0: |
||||
sys.stdout.write(diff_string) |
||||
|
||||
if __name__ == '__main__': |
||||
main() |
@ -1,53 +1,46 @@
@@ -1,53 +1,46 @@
|
||||
#!/usr/bin/env python |
||||
''' |
||||
Run this script inside of src/ and it will look for all the files |
||||
that were changed this year that still have the last year in the |
||||
copyright headers, and it will fix the headers on that file using |
||||
a perl regex one liner. |
||||
Run this script to update all the copyright headers of files |
||||
that were changed this year. |
||||
|
||||
For example: if it finds something like this and we're in 2014 |
||||
For example: |
||||
|
||||
// Copyright (c) 2009-2013 The Bitcoin Core developers |
||||
// Copyright (c) 2009-2012 The Bitcoin Core developers |
||||
|
||||
it will change it to |
||||
|
||||
// Copyright (c) 2009-2014 The Bitcoin Core developers |
||||
|
||||
It will do this for all the files in the folder and its children. |
||||
|
||||
Author: @gubatron |
||||
// Copyright (c) 2009-2015 The Bitcoin Core developers |
||||
''' |
||||
import os |
||||
import time |
||||
import re |
||||
|
||||
year = time.gmtime()[0] |
||||
last_year = year - 1 |
||||
command = "perl -pi -e 's/%s The Bitcoin/%s The Bitcoin/' %s" |
||||
listFilesCommand = "find . | grep %s" |
||||
|
||||
extensions = [".cpp",".h"] |
||||
|
||||
def getLastGitModifiedDate(filePath): |
||||
gitGetLastCommitDateCommand = "git log " + filePath +" | grep Date | head -n 1" |
||||
p = os.popen(gitGetLastCommitDateCommand) |
||||
result = "" |
||||
for l in p: |
||||
result = l |
||||
break |
||||
result = result.replace("\n","") |
||||
return result |
||||
CMD_GIT_DATE = 'git log --format=@%%at -1 %s | date +"%%Y" -u -f -' |
||||
CMD_REGEX= "perl -pi -e 's/(20\d\d)(?:-20\d\d)? The Bitcoin/$1-%s The Bitcoin/' %s" |
||||
REGEX_CURRENT= re.compile("%s The Bitcoin" % year) |
||||
CMD_LIST_FILES= "find %s | grep %s" |
||||
|
||||
n=1 |
||||
for extension in extensions: |
||||
foundFiles = os.popen(listFilesCommand % extension) |
||||
for filePath in foundFiles: |
||||
filePath = filePath[1:-1] |
||||
if filePath.endswith(extension): |
||||
filePath = os.getcwd() + filePath |
||||
modifiedTime = getLastGitModifiedDate(filePath) |
||||
if len(modifiedTime) > 0 and str(year) in modifiedTime: |
||||
print n,"Last Git Modified: ", modifiedTime, " - ", filePath |
||||
os.popen(command % (last_year,year,filePath)) |
||||
n = n + 1 |
||||
FOLDERS = ["./qa", "./src"] |
||||
EXTENSIONS = [".cpp",".h", ".py"] |
||||
|
||||
def get_git_date(file_path): |
||||
r = os.popen(CMD_GIT_DATE % file_path) |
||||
for l in r: |
||||
# Result is one line, so just return |
||||
return l.replace("\n","") |
||||
return "" |
||||
|
||||
n=1 |
||||
for folder in FOLDERS: |
||||
for extension in EXTENSIONS: |
||||
for file_path in os.popen(CMD_LIST_FILES % (folder, extension)): |
||||
file_path = os.getcwd() + file_path[1:-1] |
||||
if file_path.endswith(extension): |
||||
git_date = get_git_date(file_path) |
||||
if str(year) == git_date: |
||||
# Only update if current year is not found |
||||
if REGEX_CURRENT.search(open(file_path, "r").read()) is None: |
||||
print n,"Last git edit", git_date, "-", file_path |
||||
os.popen(CMD_REGEX % (year,file_path)) |
||||
n = n + 1 |
||||
|
@ -0,0 +1,235 @@
@@ -0,0 +1,235 @@
|
||||
#!/usr/bin/env python2 |
||||
# Copyright (c) 2016 Bitcoin Core Developers |
||||
# Distributed under the MIT software license, see the accompanying |
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php. |
||||
|
||||
# This script will locally construct a merge commit for a pull request on a |
||||
# github repository, inspect it, sign it and optionally push it. |
||||
|
||||
# The following temporary branches are created/overwritten and deleted: |
||||
# * pull/$PULL/base (the current master we're merging onto) |
||||
# * pull/$PULL/head (the current state of the remote pull request) |
||||
# * pull/$PULL/merge (github's merge) |
||||
# * pull/$PULL/local-merge (our merge) |
||||
|
||||
# In case of a clean merge that is accepted by the user, the local branch with |
||||
# name $BRANCH is overwritten with the merged result, and optionally pushed. |
||||
from __future__ import division,print_function,unicode_literals |
||||
import os,sys |
||||
from sys import stdin,stdout,stderr |
||||
import argparse |
||||
import subprocess |
||||
|
||||
# External tools (can be overridden using environment) |
||||
GIT = os.getenv('GIT','git') |
||||
BASH = os.getenv('BASH','bash') |
||||
|
||||
# OS specific configuration for terminal attributes |
||||
ATTR_RESET = '' |
||||
ATTR_PR = '' |
||||
COMMIT_FORMAT = '%h %s (%an)%d' |
||||
if os.name == 'posix': # if posix, assume we can use basic terminal escapes |
||||
ATTR_RESET = '\033[0m' |
||||
ATTR_PR = '\033[1;36m' |
||||
COMMIT_FORMAT = '%C(bold blue)%h%Creset %s %C(cyan)(%an)%Creset%C(green)%d%Creset' |
||||
|
||||
def git_config_get(option, default=None): |
||||
''' |
||||
Get named configuration option from git repository. |
||||
''' |
||||
try: |
||||
return subprocess.check_output([GIT,'config','--get',option]).rstrip() |
||||
except subprocess.CalledProcessError as e: |
||||
return default |
||||
|
||||
def retrieve_pr_title(repo,pull): |
||||
''' |
||||
Retrieve pull request title from github. |
||||
Return None if no title can be found, or an error happens. |
||||
''' |
||||
import urllib2,json |
||||
try: |
||||
req = urllib2.Request("https://api.github.com/repos/"+repo+"/pulls/"+pull) |
||||
result = urllib2.urlopen(req) |
||||
result = json.load(result) |
||||
return result['title'] |
||||
except Exception as e: |
||||
print('Warning: unable to retrieve pull title from github: %s' % e) |
||||
return None |
||||
|
||||
def ask_prompt(text): |
||||
print(text,end=" ",file=stderr) |
||||
reply = stdin.readline().rstrip() |
||||
print("",file=stderr) |
||||
return reply |
||||
|
||||
def parse_arguments(branch): |
||||
epilog = ''' |
||||
In addition, you can set the following git configuration variables: |
||||
githubmerge.repository (mandatory), |
||||
user.signingkey (mandatory), |
||||
githubmerge.host (default: git@github.com), |
||||
githubmerge.branch (default: master), |
||||
githubmerge.testcmd (default: none). |
||||
''' |
||||
parser = argparse.ArgumentParser(description='Utility to merge, sign and push github pull requests', |
||||
epilog=epilog) |
||||
parser.add_argument('pull', metavar='PULL', type=int, nargs=1, |
||||
help='Pull request ID to merge') |
||||
parser.add_argument('branch', metavar='BRANCH', type=str, nargs='?', |
||||
default=branch, help='Branch to merge against (default: '+branch+')') |
||||
return parser.parse_args() |
||||
|
||||
def main(): |
||||
# Extract settings from git repo |
||||
repo = git_config_get('githubmerge.repository') |
||||
host = git_config_get('githubmerge.host','git@github.com') |
||||
branch = git_config_get('githubmerge.branch','master') |
||||
testcmd = git_config_get('githubmerge.testcmd') |
||||
signingkey = git_config_get('user.signingkey') |
||||
if repo is None: |
||||
print("ERROR: No repository configured. Use this command to set:", file=stderr) |
||||
print("git config githubmerge.repository <owner>/<repo>", file=stderr) |
||||
exit(1) |
||||
if signingkey is None: |
||||
print("ERROR: No GPG signing key set. Set one using:",file=stderr) |
||||
print("git config --global user.signingkey <key>",file=stderr) |
||||
exit(1) |
||||
|
||||
host_repo = host+":"+repo # shortcut for push/pull target |
||||
|
||||
# Extract settings from command line |
||||
args = parse_arguments(branch) |
||||
pull = str(args.pull[0]) |
||||
branch = args.branch |
||||
|
||||
# Initialize source branches |
||||
head_branch = 'pull/'+pull+'/head' |
||||
base_branch = 'pull/'+pull+'/base' |
||||
merge_branch = 'pull/'+pull+'/merge' |
||||
local_merge_branch = 'pull/'+pull+'/local-merge' |
||||
|
||||
devnull = open(os.devnull,'w') |
||||
try: |
||||
subprocess.check_call([GIT,'checkout','-q',branch]) |
||||
except subprocess.CalledProcessError as e: |
||||
print("ERROR: Cannot check out branch %s." % (branch), file=stderr) |
||||
exit(3) |
||||
try: |
||||
subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*']) |
||||
except subprocess.CalledProcessError as e: |
||||
print("ERROR: Cannot find pull request #%s on %s." % (pull,host_repo), file=stderr) |
||||
exit(3) |
||||
try: |
||||
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout) |
||||
except subprocess.CalledProcessError as e: |
||||
print("ERROR: Cannot find head of pull request #%s on %s." % (pull,host_repo), file=stderr) |
||||
exit(3) |
||||
try: |
||||
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout) |
||||
except subprocess.CalledProcessError as e: |
||||
print("ERROR: Cannot find merge of pull request #%s on %s." % (pull,host_repo), file=stderr) |
||||
exit(3) |
||||
try: |
||||
subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/heads/'+branch+':refs/heads/'+base_branch]) |
||||
except subprocess.CalledProcessError as e: |
||||
print("ERROR: Cannot find branch %s on %s." % (branch,host_repo), file=stderr) |
||||
exit(3) |
||||
subprocess.check_call([GIT,'checkout','-q',base_branch]) |
||||
subprocess.call([GIT,'branch','-q','-D',local_merge_branch], stderr=devnull) |
||||
subprocess.check_call([GIT,'checkout','-q','-b',local_merge_branch]) |
||||
|
||||
try: |
||||
# Create unsigned merge commit. |
||||
title = retrieve_pr_title(repo,pull) |
||||
if title: |
||||
firstline = 'Merge #%s: %s' % (pull,title) |
||||
else: |
||||
firstline = 'Merge #%s' % (pull,) |
||||
message = firstline + '\n\n' |
||||
message += subprocess.check_output([GIT,'log','--no-merges','--topo-order','--pretty=format:%h %s (%an)',base_branch+'..'+head_branch]).decode('utf-8') |
||||
try: |
||||
subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','-m',message.encode('utf-8'),head_branch]) |
||||
except subprocess.CalledProcessError as e: |
||||
print("ERROR: Cannot be merged cleanly.",file=stderr) |
||||
subprocess.check_call([GIT,'merge','--abort']) |
||||
exit(4) |
||||
logmsg = subprocess.check_output([GIT,'log','--pretty=format:%s','-n','1']).decode('utf-8') |
||||
if logmsg.rstrip() != firstline.rstrip(): |
||||
print("ERROR: Creating merge failed (already merged?).",file=stderr) |
||||
exit(4) |
||||
|
||||
print('%s#%s%s %s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title)) |
||||
subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch]) |
||||
print() |
||||
# Run test command if configured. |
||||
if testcmd: |
||||
# Go up to the repository's root. |
||||
toplevel = subprocess.check_output([GIT,'rev-parse','--show-toplevel']).strip() |
||||
os.chdir(toplevel) |
||||
if subprocess.call(testcmd,shell=True): |
||||
print("ERROR: Running %s failed." % testcmd,file=stderr) |
||||
exit(5) |
||||
|
||||
# Show the created merge. |
||||
diff = subprocess.check_output([GIT,'diff',merge_branch+'..'+local_merge_branch]) |
||||
subprocess.check_call([GIT,'diff',base_branch+'..'+local_merge_branch]) |
||||
if diff: |
||||
print("WARNING: merge differs from github!",file=stderr) |
||||
reply = ask_prompt("Type 'ignore' to continue.") |
||||
if reply.lower() == 'ignore': |
||||
print("Difference with github ignored.",file=stderr) |
||||
else: |
||||
exit(6) |
||||
reply = ask_prompt("Press 'd' to accept the diff.") |
||||
if reply.lower() == 'd': |
||||
print("Diff accepted.",file=stderr) |
||||
else: |
||||
print("ERROR: Diff rejected.",file=stderr) |
||||
exit(6) |
||||
else: |
||||
# Verify the result manually. |
||||
print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr) |
||||
print("Run 'git diff HEAD~' to show the changes being merged.",file=stderr) |
||||
print("Type 'exit' when done.",file=stderr) |
||||
if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt |
||||
os.putenv('debian_chroot',pull) |
||||
subprocess.call([BASH,'-i']) |
||||
reply = ask_prompt("Type 'm' to accept the merge.") |
||||
if reply.lower() == 'm': |
||||
print("Merge accepted.",file=stderr) |
||||
else: |
||||
print("ERROR: Merge rejected.",file=stderr) |
||||
exit(7) |
||||
|
||||
# Sign the merge commit. |
||||
reply = ask_prompt("Type 's' to sign off on the merge.") |
||||
if reply == 's': |
||||
try: |
||||
subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit']) |
||||
except subprocess.CalledProcessError as e: |
||||
print("Error signing, exiting.",file=stderr) |
||||
exit(1) |
||||
else: |
||||
print("Not signing off on merge, exiting.",file=stderr) |
||||
exit(1) |
||||
|
||||
# Put the result in branch. |
||||
subprocess.check_call([GIT,'checkout','-q',branch]) |
||||
subprocess.check_call([GIT,'reset','-q','--hard',local_merge_branch]) |
||||
finally: |
||||
# Clean up temporary branches. |
||||
subprocess.call([GIT,'checkout','-q',branch]) |
||||
subprocess.call([GIT,'branch','-q','-D',head_branch],stderr=devnull) |
||||
subprocess.call([GIT,'branch','-q','-D',base_branch],stderr=devnull) |
||||
subprocess.call([GIT,'branch','-q','-D',merge_branch],stderr=devnull) |
||||
subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull) |
||||
|
||||
# Push the result. |
||||
reply = ask_prompt("Type 'push' to push the result to %s, branch %s." % (host_repo,branch)) |
||||
if reply.lower() == 'push': |
||||
subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch]) |
||||
|
||||
if __name__ == '__main__': |
||||
main() |
||||
|
@ -1,185 +0,0 @@
@@ -1,185 +0,0 @@
|
||||
#!/bin/bash |
||||
|
||||
# This script will locally construct a merge commit for a pull request on a |
||||
# github repository, inspect it, sign it and optionally push it. |
||||
|
||||
# The following temporary branches are created/overwritten and deleted: |
||||
# * pull/$PULL/base (the current master we're merging onto) |
||||
# * pull/$PULL/head (the current state of the remote pull request) |
||||
# * pull/$PULL/merge (github's merge) |
||||
# * pull/$PULL/local-merge (our merge) |
||||
|
||||
# In case of a clean merge that is accepted by the user, the local branch with |
||||
# name $BRANCH is overwritten with the merged result, and optionally pushed. |
||||
|
||||
REPO="$(git config --get githubmerge.repository)" |
||||
if [[ "d$REPO" == "d" ]]; then |
||||
echo "ERROR: No repository configured. Use this command to set:" >&2 |
||||
echo "git config githubmerge.repository <owner>/<repo>" >&2 |
||||
echo "In addition, you can set the following variables:" >&2 |
||||
echo "- githubmerge.host (default git@github.com)" >&2 |
||||
echo "- githubmerge.branch (default master)" >&2 |
||||
echo "- githubmerge.testcmd (default none)" >&2 |
||||
exit 1 |
||||
fi |
||||
|
||||
HOST="$(git config --get githubmerge.host)" |
||||
if [[ "d$HOST" == "d" ]]; then |
||||
HOST="git@github.com" |
||||
fi |
||||
|
||||
BRANCH="$(git config --get githubmerge.branch)" |
||||
if [[ "d$BRANCH" == "d" ]]; then |
||||
BRANCH="master" |
||||
fi |
||||
|
||||
TESTCMD="$(git config --get githubmerge.testcmd)" |
||||
|
||||
PULL="$1" |
||||
|
||||
if [[ "d$PULL" == "d" ]]; then |
||||
echo "Usage: $0 pullnumber [branch]" >&2 |
||||
exit 2 |
||||
fi |
||||
|
||||
if [[ "d$2" != "d" ]]; then |
||||
BRANCH="$2" |
||||
fi |
||||
|
||||
# Initialize source branches. |
||||
git checkout -q "$BRANCH" |
||||
if git fetch -q "$HOST":"$REPO" "+refs/pull/$PULL/*:refs/heads/pull/$PULL/*"; then |
||||
if ! git log -q -1 "refs/heads/pull/$PULL/head" >/dev/null 2>&1; then |
||||
echo "ERROR: Cannot find head of pull request #$PULL on $HOST:$REPO." >&2 |
||||
exit 3 |
||||
fi |
||||
if ! git log -q -1 "refs/heads/pull/$PULL/merge" >/dev/null 2>&1; then |
||||
echo "ERROR: Cannot find merge of pull request #$PULL on $HOST:$REPO." >&2 |
||||
exit 3 |
||||
fi |
||||
else |
||||
echo "ERROR: Cannot find pull request #$PULL on $HOST:$REPO." >&2 |
||||
exit 3 |
||||
fi |
||||
if git fetch -q "$HOST":"$REPO" +refs/heads/"$BRANCH":refs/heads/pull/"$PULL"/base; then |
||||
true |
||||
else |
||||
echo "ERROR: Cannot find branch $BRANCH on $HOST:$REPO." >&2 |
||||
exit 3 |
||||
fi |
||||
git checkout -q pull/"$PULL"/base |
||||
git branch -q -D pull/"$PULL"/local-merge 2>/dev/null |
||||
git checkout -q -b pull/"$PULL"/local-merge |
||||
TMPDIR="$(mktemp -d -t ghmXXXXX)" |
||||
|
||||
function cleanup() { |
||||
git checkout -q "$BRANCH" |
||||
git branch -q -D pull/"$PULL"/head 2>/dev/null |
||||
git branch -q -D pull/"$PULL"/base 2>/dev/null |
||||
git branch -q -D pull/"$PULL"/merge 2>/dev/null |
||||
git branch -q -D pull/"$PULL"/local-merge 2>/dev/null |
||||
rm -rf "$TMPDIR" |
||||
} |
||||
|
||||
# Create unsigned merge commit. |
||||
( |
||||
echo "Merge pull request #$PULL" |
||||
echo "" |
||||
git log --no-merges --topo-order --pretty='format:%h %s (%an)' pull/"$PULL"/base..pull/"$PULL"/head |
||||
)>"$TMPDIR/message" |
||||
if git merge -q --commit --no-edit --no-ff -m "$(<"$TMPDIR/message")" pull/"$PULL"/head; then |
||||
if [ "d$(git log --pretty='format:%s' -n 1)" != "dMerge pull request #$PULL" ]; then |
||||
echo "ERROR: Creating merge failed (already merged?)." >&2 |
||||
cleanup |
||||
exit 4 |
||||
fi |
||||
else |
||||
echo "ERROR: Cannot be merged cleanly." >&2 |
||||
git merge --abort |
||||
cleanup |
||||
exit 4 |
||||
fi |
||||
|
||||
# Run test command if configured. |
||||
if [[ "d$TESTCMD" != "d" ]]; then |
||||
# Go up to the repository's root. |
||||
while [ ! -d .git ]; do cd ..; done |
||||
if ! $TESTCMD; then |
||||
echo "ERROR: Running $TESTCMD failed." >&2 |
||||
cleanup |
||||
exit 5 |
||||
fi |
||||
# Show the created merge. |
||||
git diff pull/"$PULL"/merge..pull/"$PULL"/local-merge >"$TMPDIR"/diff |
||||
git diff pull/"$PULL"/base..pull/"$PULL"/local-merge |
||||
if [[ "$(<"$TMPDIR"/diff)" != "" ]]; then |
||||
echo "WARNING: merge differs from github!" >&2 |
||||
read -p "Type 'ignore' to continue. " -r >&2 |
||||
if [[ "d$REPLY" =~ ^d[iI][gG][nN][oO][rR][eE]$ ]]; then |
||||
echo "Difference with github ignored." >&2 |
||||
else |
||||
cleanup |
||||
exit 6 |
||||
fi |
||||
fi |
||||
read -p "Press 'd' to accept the diff. " -n 1 -r >&2 |
||||
echo |
||||
if [[ "d$REPLY" =~ ^d[dD]$ ]]; then |
||||
echo "Diff accepted." >&2 |
||||
else |
||||
echo "ERROR: Diff rejected." >&2 |
||||
cleanup |
||||
exit 6 |
||||
fi |
||||
else |
||||
# Verify the result. |
||||
echo "Dropping you on a shell so you can try building/testing the merged source." >&2 |
||||
echo "Run 'git diff HEAD~' to show the changes being merged." >&2 |
||||
echo "Type 'exit' when done." >&2 |
||||
if [[ -f /etc/debian_version ]]; then # Show pull number in prompt on Debian default prompt |
||||
export debian_chroot="$PULL" |
||||
fi |
||||
bash -i |
||||
read -p "Press 'm' to accept the merge. " -n 1 -r >&2 |
||||
echo |
||||
if [[ "d$REPLY" =~ ^d[Mm]$ ]]; then |
||||
echo "Merge accepted." >&2 |
||||
else |
||||
echo "ERROR: Merge rejected." >&2 |
||||
cleanup |
||||
exit 7 |
||||
fi |
||||
fi |
||||
|
||||
# Sign the merge commit. |
||||
read -p "Press 's' to sign off on the merge. " -n 1 -r >&2 |
||||
echo |
||||
if [[ "d$REPLY" =~ ^d[Ss]$ ]]; then |
||||
if [[ "$(git config --get user.signingkey)" == "" ]]; then |
||||
echo "ERROR: No GPG signing key set, not signing. Set one using:" >&2 |
||||
echo "git config --global user.signingkey <key>" >&2 |
||||
cleanup |
||||
exit 1 |
||||
else |
||||
if ! git commit -q --gpg-sign --amend --no-edit; then |
||||
echo "Error signing, exiting." |
||||
cleanup |
||||
exit 1 |
||||
fi |
||||
fi |
||||
else |
||||
echo "Not signing off on merge, exiting." |
||||
cleanup |
||||
exit 1 |
||||
fi |
||||
|
||||
# Clean up temporary branches, and put the result in $BRANCH. |
||||
git checkout -q "$BRANCH" |
||||
git reset -q --hard pull/"$PULL"/local-merge |
||||
cleanup |
||||
|
||||
# Push the result. |
||||
read -p "Type 'push' to push the result to $HOST:$REPO, branch $BRANCH. " -r >&2 |
||||
if [[ "d$REPLY" =~ ^d[Pp][Uu][Ss][Hh]$ ]]; then |
||||
git push "$HOST":"$REPO" refs/heads/"$BRANCH" |
||||
fi |
@ -0,0 +1,52 @@
@@ -0,0 +1,52 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK----- |
||||
Version: GnuPG v1 |
||||
|
||||
mQINBFT4snkBEACx90Wf5XLo1Xv09p81eaOXc+8bbkSYzqx3ThDNUPRzjYpex9A9 |
||||
8FxfBenAykD3EgYuBTco4cbn7Dw11ppyXUw0VjWaagnnAVGxt3SDeY3ADwPss6xg |
||||
78FZXxT06xSHZXq1X6pOqhwTAnx3VGx+tR/A2DCsX0vHE6IVThZqyUq2Ei2C0Chc |
||||
od8y6JZ1CGNzlRkEgL9A0Zp0If6Uq4tXFxnLL6PtiS1b9V5rNfCSC7l99kIkG5oy |
||||
+SPsGRwVqTE2kqtuzkt9qVn6v8KKoZr0BY4IO3KMfJJ4eidOkB+OZK9REEQguDvv |
||||
tJfkF2HcMYa1efvQObyvVIfS5gxs7+kcSJxgDVZI5YxRV1OOfI7+w3EW3G+bPBQF |
||||
gSBwEaLbD+udr9lDZ4NZc7vTeoZtYVNZ+EQtG+6I9GzxJwEgO5LIwZ3//vh/R4iy |
||||
z9W91r7TrlkHUuOGg1hXMCI9sRa65NJtP4BWD0xO07zDKj0JHzeyKwgxB/ixZF2V |
||||
kc8EzJSKzRfr+638BMXONcf6NW8n6qIlJT2U2qIwiixjM8AUujGKb8DEgU1vIAn9 |
||||
7esOhceOtU/6iLuJrlK+TzMe97NoZCtt6ktmiAp8fu6l9uk3mr8JYLzIMtK+Asf4 |
||||
np5YLizABwbt9gEretnGpHrdKMN88mPYwsLjjCh9wiM0bHZNL52JQRkt3QARAQAB |
||||
tDNBbmRyZXcgQ2hvdyAoT2ZmaWNpYWwgTmV3IEtleSkgPGFjaG93MTAxQGdtYWls |
||||
LmNvbT6JAjYEEwEKACAFAlT4snkCGwMFCwkIBwMFFQoJCAsEFgIBAAIeAQIXgAAK |
||||
CRAXVlcy4I5eQfyGD/9idtVjybuXl+LXS4ph4M738PrZfQeLDmnwhVjfZiEOLLs2 |
||||
sAwGtL/CC0t9f7K7y+n5HtQoMX52jfVehnTDzeKCjRMs+5ssou+L9zadIAz68beU |
||||
7BZ0J1rR3n1kzwsFE3vx3IRno0VCTOgfL48AuuzMPxvEaLMxWQX8mL0PCV5/8Yxx |
||||
ftqg4kQ1JKMt5UTxE9/w0cBMphLTwV1Rx6lZILPJgOxYSQ0oOzQYSmucwzH1uOqH |
||||
wpgZ7SZIHfRWyi4TjQpU/5T2kMOlN/XdyWsj5+Eq+Y6zI6hq2se1vU3TOc8xN2S3 |
||||
7YOza1onUj4if0rWtkJZ2yDnR4lIASUD+/VP2NoWtoy7rB0vIfzbojfwxAp8WuHT |
||||
sUTxXd52c3OB+673OlOA+GAg2FfFjR8REojsTbeip35/KmFMpafazVRn+E0c3MfP |
||||
/iS43UTlcxewRcDrx/gRplmgO0+CLgLstZOon7Dz0msypeSArhX2xEj4tJb/ccKd |
||||
CR/IQl8q/ULQsHX1LwRj0u9doAlkqgIQdKXou4+EmD1jKF92oJMZ+20AJCqfwYQY |
||||
9HlCB9SQeCRUtU/fHkAZLPApze6C7a1r0LVIuM6iolWyha5KJ++mj84fAagwy/ag |
||||
8TU8kHTLSGPYeg5G/TAbr1XU5kbbqfWfQFMK1xtdZd1BaGP2cDC2QGkr2ot1SLkC |
||||
DQRU+LJ5ARAArDftuFPE+ZhgJRuJK163fsD15aHPfv5s+h8kPFv0AuwVs+D75w3y |
||||
YGfaRtlwSvK+8EucKOoHI1AQYjTG0dtKJuwEGhQ2qsTWUKe05tEAWu0eN62MOZ/r |
||||
Awjxqotj4TeFksfyKedVAYSizD0Xj16fizeWFrfUBNND4OgUgD8KM79oRchtzKBE |
||||
HRBP27JksU8tQWc4YcEJUHV66Pji5OCiXxHXJ+JpqKSKeCrVvrvro+pwsY1I3ARA |
||||
F4UmLxCcb4GnNq+s76cb2K7XJtWJu5FHeHOsef5ped43pYs35UXI+EvOYNs39XI4 |
||||
emMsI0KmuLME2LHO3CJNBirwRFxui27axZk/CSVE1lglnbb25n3QHvbs/31ASCCT |
||||
QKZ7+Gce89iow6yG4MkN5W4hLdkGAyNI74b6yAUfugSqPLNSj3YHvVFY3y1acge+ |
||||
H7xDO/owRN1kbz+9VMJZxsxB/oZEyEVAE0szHxXbMBhqOME0Y3O6UBrXr7z6R8NG |
||||
S20RPet4kxCCTLZOvM/X5FtvimgR2u5qRPHs+zf2VPXIRsJsM3zq9EvmePryGM3r |
||||
1rEAvYagukuyt68lOWgKP/2wB0/NIFAs69b1QSJS3U4CQVIs2h84Ucvbh9gX9Y0B |
||||
LbV5mxvDDfC/4Nhf4yMfH/CwZDLOUsaRAjCv/lQuN9mnMz9aYnsPha0AEQEAAYkC |
||||
HwQYAQoACQUCVPiyeQIbDAAKCRAXVlcy4I5eQec+EACi14L8Vp7tw3tDm/Lrb9fM |
||||
LHfoOnZiDCGaXhiXqckbTSogp7hU82m1fIy4VwY7DWbs1iIq7QdDJMBuNn174Qd3 |
||||
ZPxHeGwBbR04gEsHkbjXBAA5hMacLvmxYFiPlibz+AO4orUiYu/vlEXhXoFCjSlB |
||||
pw0kUG8W8yQ/RyE7ryLv5/bT4LkwUWF7/+gdDzLUy1VeaPDKmBupKVSbEACe4QRH |
||||
dUUqE3suKoJ/GylO2sGtFW8BM7+CffX+nvc8hJWzXdYW5InSh0omYJIypIgnQ1gM |
||||
MhUdu4gbtYwo44Tlax2mTSg8vSVboYO6pBZVX3IEUnjRHLOCZVZIBFXIFdRrHXO8 |
||||
TTkzx9ZoDmZ/DH+Md1NDnS4QsvFbRO/EeDRQAI4cgGhCc4CTrrJSQv8jtl7x8OTx |
||||
fnDUbE/n8pLV93j9t1Gd07h0VJSmYj3AR7PiefHS7s2yxS9oOqRayGBqrJFzd2gS |
||||
+oXvUBC6pUvM68NgNVCKH7HmIM9tFbqgy8kofTsVDkq9TEJRO+X4hn7UDNJhTjVE |
||||
AVRUdku6CJR6wj3RPCbERSNB8uabuv1lgo41baeepLn+tJNO/4hilJ0zvEoryVnJ |
||||
ldZ73mHRRRtXoPRXq7OKuDn10AvtYX8y3/q5z6XhLUePFKM91PO8GF0J6bNWrQSq |
||||
Khvd4+XHE/ecjLOPvLweAg== |
||||
=+hz7 |
||||
-----END PGP PUBLIC KEY BLOCK----- |
@ -1,45 +0,0 @@
@@ -1,45 +0,0 @@
|
||||
--- |
||||
name: bitcoin |
||||
urls: |
||||
- http://bitcoin.org/bitcoin-latest-linux-gitian.zip |
||||
rss: |
||||
- url: |
||||
xpath: //item/link/text() |
||||
pattern: bitcoin-\d+.\d+.\d+-linux-gitian.zip |
||||
signers: |
||||
0A82509767C7D4A5D14DA2301AE1D35043E08E54: |
||||
name: BlueMatt |
||||
key: bluematt |
||||
BF6273FAEF7CC0BA1F562E50989F6B3048A116B5: |
||||
name: Devrandom |
||||
key: devrandom |
||||
E463A93F5F3117EEDE6C7316BD02942421F4889F: |
||||
name: Luke-Jr |
||||
key: luke-jr |
||||
D762373D24904A3E42F33B08B9A408E71DAAC974: |
||||
name: "Pieter Wuille" |
||||
key: sipa |
||||
77E72E69DA7EE0A148C06B21B34821D4944DE5F7: |
||||
name: tcatm |
||||
key: tcatm |
||||
01CDF4627A3B88AAE4A571C87588242FBE38D3A8: |
||||
name: "Gavin Andresen" |
||||
key: gavinandresen |
||||
71A3B16735405025D447E8F274810B012346C9A6: |
||||
name: "Wladimir J. van der Laan" |
||||
key: laanwj |
||||
AEC1884398647C47413C1C3FB1179EB7347DC10D: |
||||
name: "Warren Togami" |
||||
key: wtogami |
||||
9692B91BBF0E8D34DFD33B1882C5C009628ECF0C: |
||||
name: michagogo |
||||
key: michagogo |
||||
E944AE667CF960B1004BC32FCA662BE18B877A60: |
||||
name: "Andreas Schildbach" |
||||
key: aschildbach |
||||
C060A6635913D98A3587D7DB1C2491FFEB0EF770: |
||||
name: "Cory Fields" |
||||
key: "cfields" |
||||
37EC7D7B0A217CDB4B4E007E7FAB114267E4FA04: |
||||
name: "Peter Todd" |
||||
key: "petertodd" |
@ -1,45 +0,0 @@
@@ -1,45 +0,0 @@
|
||||
--- |
||||
name: bitcoin |
||||
urls: |
||||
- http://bitcoin.org/bitcoin-latest-win32-gitian.zip |
||||
rss: |
||||
- url: |
||||
xpath: //item/link/text() |
||||
pattern: bitcoin-\d+.\d+.\d+-win32-gitian.zip |
||||
signers: |
||||
0A82509767C7D4A5D14DA2301AE1D35043E08E54: |
||||
name: BlueMatt |
||||
key: bluematt |
||||
BF6273FAEF7CC0BA1F562E50989F6B3048A116B5: |
||||
name: Devrandom |
||||
key: devrandom |
||||
E463A93F5F3117EEDE6C7316BD02942421F4889F: |
||||
name: Luke-Jr |
||||
key: luke-jr |
||||
D762373D24904A3E42F33B08B9A408E71DAAC974: |
||||
name: "Pieter Wuille" |
||||
key: sipa |
||||
77E72E69DA7EE0A148C06B21B34821D4944DE5F7: |
||||
name: tcatm |
||||
key: tcatm |
||||
01CDF4627A3B88AAE4A571C87588242FBE38D3A8: |
||||
name: "Gavin Andresen" |
||||
key: gavinandresen |
||||
71A3B16735405025D447E8F274810B012346C9A6: |
||||
name: "Wladimir J. van der Laan" |
||||
key: laanwj |
||||
AEC1884398647C47413C1C3FB1179EB7347DC10D: |
||||
name: "Warren Togami" |
||||
key: wtogami |
||||
9692B91BBF0E8D34DFD33B1882C5C009628ECF0C: |
||||
name: michagogo |
||||
key: michagogo |
||||
E944AE667CF960B1004BC32FCA662BE18B877A60: |
||||
name: "Andreas Schildbach" |
||||
key: aschildbach |
||||
C060A6635913D98A3587D7DB1C2491FFEB0EF770: |
||||
name: "Cory Fields" |
||||
key: "cfields" |
||||
37EC7D7B0A217CDB4B4E007E7FAB114267E4FA04: |
||||
name: "Peter Todd" |
||||
key: "petertodd" |
@ -1,18 +1,18 @@
@@ -1,18 +1,18 @@
|
||||
Compiling/running unit tests |
||||
------------------------------------ |
||||
|
||||
Unit tests will be automatically compiled if dependencies were met in configure |
||||
Unit tests will be automatically compiled if dependencies were met in `./configure` |
||||
and tests weren't explicitly disabled. |
||||
|
||||
After configuring, they can be run with 'make check'. |
||||
After configuring, they can be run with `make check`. |
||||
|
||||
To run the bitcoind tests manually, launch src/test/test_bitcoin . |
||||
To run the bitcoind tests manually, launch `src/test/test_bitcoin`. |
||||
|
||||
To add more bitcoind tests, add `BOOST_AUTO_TEST_CASE` functions to the existing |
||||
.cpp files in the test/ directory or add new .cpp files that |
||||
.cpp files in the `test/` directory or add new .cpp files that |
||||
implement new BOOST_AUTO_TEST_SUITE sections. |
||||
|
||||
To run the bitcoin-qt tests manually, launch src/qt/test/test_bitcoin-qt |
||||
To run the bitcoin-qt tests manually, launch `src/qt/test/test_bitcoin-qt` |
||||
|
||||
To add more bitcoin-qt tests, add them to the `src/qt/test/` directory and |
||||
the `src/qt/test/test_main.cpp` file. |
||||
|
@ -0,0 +1,153 @@
@@ -0,0 +1,153 @@
|
||||
#!/usr/bin/env python2 |
||||
# Copyright (c) 2014-2015 The Bitcoin Core developers |
||||
# Distributed under the MIT software license, see the accompanying |
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php. |
||||
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework |
||||
from test_framework.util import * |
||||
try: |
||||
import urllib.parse as urlparse |
||||
except ImportError: |
||||
import urlparse |
||||
|
||||
class AbandonConflictTest(BitcoinTestFramework): |
||||
|
||||
def setup_network(self): |
||||
self.nodes = [] |
||||
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"])) |
||||
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-logtimemicros"])) |
||||
connect_nodes(self.nodes[0], 1) |
||||
|
||||
def run_test(self): |
||||
self.nodes[1].generate(100) |
||||
sync_blocks(self.nodes) |
||||
balance = self.nodes[0].getbalance() |
||||
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) |
||||
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) |
||||
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) |
||||
sync_mempools(self.nodes) |
||||
self.nodes[1].generate(1) |
||||
|
||||
sync_blocks(self.nodes) |
||||
newbalance = self.nodes[0].getbalance() |
||||
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost |
||||
balance = newbalance |
||||
|
||||
url = urlparse.urlparse(self.nodes[1].url) |
||||
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1))) |
||||
|
||||
# Identify the 10btc outputs |
||||
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10")) |
||||
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10")) |
||||
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10")) |
||||
|
||||
inputs =[] |
||||
# spend 10btc outputs from txA and txB |
||||
inputs.append({"txid":txA, "vout":nA}) |
||||
inputs.append({"txid":txB, "vout":nB}) |
||||
outputs = {} |
||||
|
||||
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998") |
||||
outputs[self.nodes[1].getnewaddress()] = Decimal("5") |
||||
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs)) |
||||
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"]) |
||||
|
||||
# Identify the 14.99998btc output |
||||
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998")) |
||||
|
||||
#Create a child tx spending AB1 and C |
||||
inputs = [] |
||||
inputs.append({"txid":txAB1, "vout":nAB}) |
||||
inputs.append({"txid":txC, "vout":nC}) |
||||
outputs = {} |
||||
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996") |
||||
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs)) |
||||
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"]) |
||||
|
||||
# In mempool txs from self should increase balance from change |
||||
newbalance = self.nodes[0].getbalance() |
||||
assert(newbalance == balance - Decimal("30") + Decimal("24.9996")) |
||||
balance = newbalance |
||||
|
||||
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool |
||||
# TODO: redo with eviction |
||||
# Note had to make sure tx did not have AllowFree priority |
||||
stop_node(self.nodes[0],0) |
||||
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"]) |
||||
|
||||
# Verify txs no longer in mempool |
||||
assert(len(self.nodes[0].getrawmempool()) == 0) |
||||
|
||||
# Not in mempool txs from self should only reduce balance |
||||
# inputs are still spent, but change not received |
||||
newbalance = self.nodes[0].getbalance() |
||||
assert(newbalance == balance - Decimal("24.9996")) |
||||
balance = newbalance |
||||
|
||||
# Abandon original transaction and verify inputs are available again |
||||
# including that the child tx was also abandoned |
||||
self.nodes[0].abandontransaction(txAB1) |
||||
newbalance = self.nodes[0].getbalance() |
||||
assert(newbalance == balance + Decimal("30")) |
||||
balance = newbalance |
||||
|
||||
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned |
||||
stop_node(self.nodes[0],0) |
||||
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"]) |
||||
assert(len(self.nodes[0].getrawmempool()) == 0) |
||||
assert(self.nodes[0].getbalance() == balance) |
||||
|
||||
# But if its received again then it is unabandoned |
||||
# And since now in mempool, the change is available |
||||
# But its child tx remains abandoned |
||||
self.nodes[0].sendrawtransaction(signed["hex"]) |
||||
newbalance = self.nodes[0].getbalance() |
||||
assert(newbalance == balance - Decimal("20") + Decimal("14.99998")) |
||||
balance = newbalance |
||||
|
||||
# Send child tx again so its unabandoned |
||||
self.nodes[0].sendrawtransaction(signed2["hex"]) |
||||
newbalance = self.nodes[0].getbalance() |
||||
assert(newbalance == balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996")) |
||||
balance = newbalance |
||||
|
||||
# Remove using high relay fee again |
||||
stop_node(self.nodes[0],0) |
||||
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"]) |
||||
assert(len(self.nodes[0].getrawmempool()) == 0) |
||||
newbalance = self.nodes[0].getbalance() |
||||
assert(newbalance == balance - Decimal("24.9996")) |
||||
balance = newbalance |
||||
|
||||
# Create a double spend of AB1 by spending again from only A's 10 output |
||||
# Mine double spend from node 1 |
||||
inputs =[] |
||||
inputs.append({"txid":txA, "vout":nA}) |
||||
outputs = {} |
||||
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999") |
||||
tx = self.nodes[0].createrawtransaction(inputs, outputs) |
||||
signed = self.nodes[0].signrawtransaction(tx) |
||||
self.nodes[1].sendrawtransaction(signed["hex"]) |
||||
self.nodes[1].generate(1) |
||||
|
||||
connect_nodes(self.nodes[0], 1) |
||||
sync_blocks(self.nodes) |
||||
|
||||
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted |
||||
newbalance = self.nodes[0].getbalance() |
||||
assert(newbalance == balance + Decimal("20")) |
||||
balance = newbalance |
||||
|
||||
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315 |
||||
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available |
||||
# Don't think C's should either |
||||
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) |
||||
newbalance = self.nodes[0].getbalance() |
||||
#assert(newbalance == balance - Decimal("10")) |
||||
print "If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer" |
||||
print "conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315" |
||||
print balance , " -> " , newbalance , " ?" |
||||
|
||||
if __name__ == '__main__': |
||||
AbandonConflictTest().main() |
@ -0,0 +1,72 @@
@@ -0,0 +1,72 @@
|
||||
#!/usr/bin/env python2 |
||||
# Copyright (c) 2015 The Bitcoin Core developers |
||||
# Distributed under the MIT/X11 software license, see the accompanying |
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php. |
||||
# |
||||
|
||||
from test_framework.test_framework import ComparisonTestFramework |
||||
from test_framework.comptool import TestManager, TestInstance, RejectResult |
||||
from test_framework.blocktools import * |
||||
import time |
||||
|
||||
|
||||
''' |
||||
In this test we connect to one node over p2p, and test tx requests. |
||||
''' |
||||
|
||||
# Use the ComparisonTestFramework with 1 node: only use --testbinary. |
||||
class InvalidTxRequestTest(ComparisonTestFramework): |
||||
|
||||
''' Can either run this test as 1 node with expected answers, or two and compare them. |
||||
Change the "outcome" variable from each TestInstance object to only do the comparison. ''' |
||||
def __init__(self): |
||||
self.num_nodes = 1 |
||||
|
||||
def run_test(self): |
||||
test = TestManager(self, self.options.tmpdir) |
||||
test.add_all_connections(self.nodes) |
||||
self.tip = None |
||||
self.block_time = None |
||||
NetworkThread().start() # Start up network handling in another thread |
||||
test.run() |
||||
|
||||
def get_tests(self): |
||||
if self.tip is None: |
||||
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0) |
||||
self.block_time = int(time.time())+1 |
||||
|
||||
''' |
||||
Create a new block with an anyone-can-spend coinbase |
||||
''' |
||||
height = 1 |
||||
block = create_block(self.tip, create_coinbase(height), self.block_time) |
||||
self.block_time += 1 |
||||
block.solve() |
||||
# Save the coinbase for later |
||||
self.block1 = block |
||||
self.tip = block.sha256 |
||||
height += 1 |
||||
yield TestInstance([[block, True]]) |
||||
|
||||
''' |
||||
Now we need that block to mature so we can spend the coinbase. |
||||
''' |
||||
test = TestInstance(sync_every_block=False) |
||||
for i in xrange(100): |
||||
block = create_block(self.tip, create_coinbase(height), self.block_time) |
||||
block.solve() |
||||
self.tip = block.sha256 |
||||
self.block_time += 1 |
||||
test.blocks_and_transactions.append([block, True]) |
||||
height += 1 |
||||
yield test |
||||
|
||||
# chr(100) is OP_NOTIF |
||||
# Transaction will be rejected with code 16 (REJECT_INVALID) |
||||
tx1 = create_transaction(self.block1.vtx[0], 0, chr(100), 50*100000000 - 12000) |
||||
yield TestInstance([[tx1, RejectResult(16, 'mandatory-script-verify-flag-failed')]]) |
||||
|
||||
# TODO: test further transactions... |
||||
|
||||
if __name__ == '__main__': |
||||
InvalidTxRequestTest().main() |
@ -0,0 +1,55 @@
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env python2 |
||||
# Copyright (c) 2014-2015 The Bitcoin Core developers |
||||
# Distributed under the MIT software license, see the accompanying |
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php. |
||||
|
||||
# Test mempool limiting together/eviction with the wallet |
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework |
||||
from test_framework.util import * |
||||
|
||||
class MempoolLimitTest(BitcoinTestFramework): |
||||
|
||||
def __init__(self): |
||||
self.txouts = gen_return_txouts() |
||||
|
||||
def setup_network(self): |
||||
self.nodes = [] |
||||
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxmempool=5", "-spendzeroconfchange=0", "-debug"])) |
||||
self.is_network_split = False |
||||
self.sync_all() |
||||
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] |
||||
|
||||
def setup_chain(self): |
||||
print("Initializing test directory "+self.options.tmpdir) |
||||
initialize_chain_clean(self.options.tmpdir, 2) |
||||
|
||||
def run_test(self): |
||||
txids = [] |
||||
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], 90) |
||||
|
||||
#create a mempool tx that will be evicted |
||||
us0 = utxos.pop() |
||||
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}] |
||||
outputs = {self.nodes[0].getnewaddress() : 0.0001} |
||||
tx = self.nodes[0].createrawtransaction(inputs, outputs) |
||||
self.nodes[0].settxfee(self.relayfee) # specifically fund this tx with low fee |
||||
txF = self.nodes[0].fundrawtransaction(tx) |
||||
self.nodes[0].settxfee(0) # return to automatic fee selection |
||||
txFS = self.nodes[0].signrawtransaction(txF['hex']) |
||||
txid = self.nodes[0].sendrawtransaction(txFS['hex']) |
||||
self.nodes[0].lockunspent(True, [us0]) |
||||
|
||||
relayfee = self.nodes[0].getnetworkinfo()['relayfee'] |
||||
base_fee = relayfee*100 |
||||
for i in xrange (4): |
||||
txids.append([]) |
||||
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[30*i:30*i+30], (i+1)*base_fee) |
||||
|
||||
# by now, the tx should be evicted, check confirmation state |
||||
assert(txid not in self.nodes[0].getrawmempool()) |
||||
txdata = self.nodes[0].gettransaction(txid) |
||||
assert(txdata['confirmations'] == 0) #confirmation should still be 0 |
||||
|
||||
if __name__ == '__main__': |
||||
MempoolLimitTest().main() |
@ -0,0 +1,122 @@
@@ -0,0 +1,122 @@
|
||||
#!/usr/bin/env python2 |
||||
# Copyright (c) 2015 The Bitcoin Core developers |
||||
# Distributed under the MIT software license, see the accompanying |
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php. |
||||
|
||||
# |
||||
# Test mulitple rpc user config option rpcauth |
||||
# |
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework |
||||
from test_framework.util import * |
||||
import base64 |
||||
|
||||
try: |
||||
import http.client as httplib |
||||
except ImportError: |
||||
import httplib |
||||
try: |
||||
import urllib.parse as urlparse |
||||
except ImportError: |
||||
import urlparse |
||||
|
||||
class HTTPBasicsTest (BitcoinTestFramework): |
||||
def setup_nodes(self): |
||||
return start_nodes(4, self.options.tmpdir) |
||||
|
||||
def setup_chain(self): |
||||
print("Initializing test directory "+self.options.tmpdir) |
||||
initialize_chain(self.options.tmpdir) |
||||
#Append rpcauth to bitcoin.conf before initialization |
||||
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144" |
||||
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e" |
||||
with open(os.path.join(self.options.tmpdir+"/node0", "bitcoin.conf"), 'a') as f: |
||||
f.write(rpcauth+"\n") |
||||
f.write(rpcauth2+"\n") |
||||
|
||||
def run_test(self): |
||||
|
||||
################################################## |
||||
# Check correctness of the rpcauth config option # |
||||
################################################## |
||||
url = urlparse.urlparse(self.nodes[0].url) |
||||
|
||||
#Old authpair |
||||
authpair = url.username + ':' + url.password |
||||
|
||||
#New authpair generated via contrib/rpcuser tool |
||||
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144" |
||||
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM=" |
||||
|
||||
#Second authpair with different username |
||||
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e" |
||||
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI=" |
||||
authpairnew = "rt:"+password |
||||
|
||||
headers = {"Authorization": "Basic " + base64.b64encode(authpair)} |
||||
|
||||
conn = httplib.HTTPConnection(url.hostname, url.port) |
||||
conn.connect() |
||||
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) |
||||
resp = conn.getresponse() |
||||
assert_equal(resp.status==401, False) |
||||
conn.close() |
||||
|
||||
#Use new authpair to confirm both work |
||||
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)} |
||||
|
||||
conn = httplib.HTTPConnection(url.hostname, url.port) |
||||
conn.connect() |
||||
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) |
||||
resp = conn.getresponse() |
||||
assert_equal(resp.status==401, False) |
||||
conn.close() |
||||
|
||||
#Wrong login name with rt's password |
||||
authpairnew = "rtwrong:"+password |
||||
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)} |
||||
|
||||
conn = httplib.HTTPConnection(url.hostname, url.port) |
||||
conn.connect() |
||||
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) |
||||
resp = conn.getresponse() |
||||
assert_equal(resp.status==401, True) |
||||
conn.close() |
||||
|
||||
#Wrong password for rt |
||||
authpairnew = "rt:"+password+"wrong" |
||||
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)} |
||||
|
||||
conn = httplib.HTTPConnection(url.hostname, url.port) |
||||
conn.connect() |
||||
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) |
||||
resp = conn.getresponse() |
||||
assert_equal(resp.status==401, True) |
||||
conn.close() |
||||
|
||||
#Correct for rt2 |
||||
authpairnew = "rt2:"+password2 |
||||
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)} |
||||
|
||||
conn = httplib.HTTPConnection(url.hostname, url.port) |
||||
conn.connect() |
||||
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) |
||||
resp = conn.getresponse() |
||||
assert_equal(resp.status==401, False) |
||||
conn.close() |
||||
|
||||
#Wrong password for rt2 |
||||
authpairnew = "rt2:"+password2+"wrong" |
||||
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)} |
||||
|
||||
conn = httplib.HTTPConnection(url.hostname, url.port) |
||||
conn.connect() |
||||
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) |
||||
resp = conn.getresponse() |
||||
assert_equal(resp.status==401, True) |
||||
conn.close() |
||||
|
||||
|
||||
|
||||
if __name__ == '__main__': |
||||
HTTPBasicsTest ().main () |
@ -0,0 +1,126 @@
@@ -0,0 +1,126 @@
|
||||
#!/usr/bin/env python2 |
||||
# Copyright (c) 2015 The Bitcoin Core developers |
||||
# Distributed under the MIT software license, see the accompanying |
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php. |
||||
|
||||
# |
||||
# Test PrioritiseTransaction code |
||||
# |
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework |
||||
from test_framework.util import * |
||||
|
||||
COIN = 100000000 |
||||
|
||||
class PrioritiseTransactionTest(BitcoinTestFramework): |
||||
|
||||
def __init__(self): |
||||
self.txouts = gen_return_txouts() |
||||
|
||||
def setup_chain(self): |
||||
print("Initializing test directory "+self.options.tmpdir) |
||||
initialize_chain_clean(self.options.tmpdir, 1) |
||||
|
||||
def setup_network(self): |
||||
self.nodes = [] |
||||
self.is_network_split = False |
||||
|
||||
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-printpriority=1"])) |
||||
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] |
||||
|
||||
def run_test(self): |
||||
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], 90) |
||||
base_fee = self.relayfee*100 # our transactions are smaller than 100kb |
||||
txids = [] |
||||
|
||||
# Create 3 batches of transactions at 3 different fee rate levels |
||||
for i in xrange(3): |
||||
txids.append([]) |
||||
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[30*i:30*i+30], (i+1)*base_fee) |
||||
|
||||
# add a fee delta to something in the cheapest bucket and make sure it gets mined |
||||
# also check that a different entry in the cheapest bucket is NOT mined (lower |
||||
# the priority to ensure its not mined due to priority) |
||||
self.nodes[0].prioritisetransaction(txids[0][0], 0, int(3*base_fee*COIN)) |
||||
self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0) |
||||
|
||||
self.nodes[0].generate(1) |
||||
|
||||
mempool = self.nodes[0].getrawmempool() |
||||
print "Assert that prioritised transasction was mined" |
||||
assert(txids[0][0] not in mempool) |
||||
assert(txids[0][1] in mempool) |
||||
|
||||
high_fee_tx = None |
||||
for x in txids[2]: |
||||
if x not in mempool: |
||||
high_fee_tx = x |
||||
|
||||
# Something high-fee should have been mined! |
||||
assert(high_fee_tx != None) |
||||
|
||||
# Add a prioritisation before a tx is in the mempool (de-prioritising a |
||||
# high-fee transaction). |
||||
self.nodes[0].prioritisetransaction(high_fee_tx, -1e15, -int(2*base_fee*COIN)) |
||||
|
||||
# Add everything back to mempool |
||||
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) |
||||
|
||||
# Check to make sure our high fee rate tx is back in the mempool |
||||
mempool = self.nodes[0].getrawmempool() |
||||
assert(high_fee_tx in mempool) |
||||
|
||||
# Now verify the high feerate transaction isn't mined. |
||||
self.nodes[0].generate(5) |
||||
|
||||
# High fee transaction should not have been mined, but other high fee rate |
||||
# transactions should have been. |
||||
mempool = self.nodes[0].getrawmempool() |
||||
print "Assert that de-prioritised transaction is still in mempool" |
||||
assert(high_fee_tx in mempool) |
||||
for x in txids[2]: |
||||
if (x != high_fee_tx): |
||||
assert(x not in mempool) |
||||
|
||||
# Create a free, low priority transaction. Should be rejected. |
||||
utxo_list = self.nodes[0].listunspent() |
||||
assert(len(utxo_list) > 0) |
||||
utxo = utxo_list[0] |
||||
|
||||
inputs = [] |
||||
outputs = {} |
||||
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]}) |
||||
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee |
||||
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) |
||||
tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"] |
||||
txid = self.nodes[0].sendrawtransaction(tx_hex) |
||||
|
||||
# A tx that spends an in-mempool tx has 0 priority, so we can use it to |
||||
# test the effect of using prioritise transaction for mempool acceptance |
||||
inputs = [] |
||||
inputs.append({"txid": txid, "vout": 0}) |
||||
outputs = {} |
||||
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee |
||||
raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs) |
||||
tx2_hex = self.nodes[0].signrawtransaction(raw_tx2)["hex"] |
||||
tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"] |
||||
|
||||
try: |
||||
self.nodes[0].sendrawtransaction(tx2_hex) |
||||
except JSONRPCException as exp: |
||||
assert_equal(exp.error['code'], -26) # insufficient fee |
||||
assert(tx2_id not in self.nodes[0].getrawmempool()) |
||||
else: |
||||
assert(False) |
||||
|
||||
# This is a less than 1000-byte transaction, so just set the fee |
||||
# to be the minimum for a 1000 byte transaction and check that it is |
||||
# accepted. |
||||
self.nodes[0].prioritisetransaction(tx2_id, 0, int(self.relayfee*COIN)) |
||||
|
||||
print "Assert that prioritised free transaction is accepted to mempool" |
||||
assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id) |
||||
assert(tx2_id in self.nodes[0].getrawmempool()) |
||||
|
||||
if __name__ == '__main__': |
||||
PrioritiseTransactionTest().main() |
@ -0,0 +1,592 @@
@@ -0,0 +1,592 @@
|
||||
#!/usr/bin/env python2 |
||||
# Copyright (c) 2014-2015 The Bitcoin Core developers |
||||
# Distributed under the MIT software license, see the accompanying |
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php. |
||||
|
||||
# |
||||
# Test replace by fee code |
||||
# |
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework |
||||
from test_framework.util import * |
||||
from test_framework.script import * |
||||
from test_framework.mininode import * |
||||
import binascii |
||||
|
||||
COIN = 100000000 |
||||
MAX_REPLACEMENT_LIMIT = 100 |
||||
|
||||
def satoshi_round(amount): |
||||
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) |
||||
|
||||
def txToHex(tx): |
||||
return binascii.hexlify(tx.serialize()).decode('utf-8') |
||||
|
||||
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])): |
||||
"""Create a txout with a given amount and scriptPubKey |
||||
|
||||
Mines coins as needed. |
||||
|
||||
confirmed - txouts created will be confirmed in the blockchain; |
||||
unconfirmed otherwise. |
||||
""" |
||||
fee = 1*COIN |
||||
while node.getbalance() < satoshi_round((amount + fee)/COIN): |
||||
node.generate(100) |
||||
#print (node.getbalance(), amount, fee) |
||||
|
||||
new_addr = node.getnewaddress() |
||||
#print new_addr |
||||
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN)) |
||||
tx1 = node.getrawtransaction(txid, 1) |
||||
txid = int(txid, 16) |
||||
i = None |
||||
|
||||
for i, txout in enumerate(tx1['vout']): |
||||
#print i, txout['scriptPubKey']['addresses'] |
||||
if txout['scriptPubKey']['addresses'] == [new_addr]: |
||||
#print i |
||||
break |
||||
assert i is not None |
||||
|
||||
tx2 = CTransaction() |
||||
tx2.vin = [CTxIn(COutPoint(txid, i))] |
||||
tx2.vout = [CTxOut(amount, scriptPubKey)] |
||||
tx2.rehash() |
||||
|
||||
binascii.hexlify(tx2.serialize()).decode('utf-8') |
||||
|
||||
signed_tx = node.signrawtransaction(binascii.hexlify(tx2.serialize()).decode('utf-8')) |
||||
|
||||
txid = node.sendrawtransaction(signed_tx['hex'], True) |
||||
|
||||
# If requested, ensure txouts are confirmed. |
||||
if confirmed: |
||||
mempool_size = len(node.getrawmempool()) |
||||
while mempool_size > 0: |
||||
node.generate(1) |
||||
new_size = len(node.getrawmempool()) |
||||
# Error out if we have something stuck in the mempool, as this |
||||
# would likely be a bug. |
||||
assert(new_size < mempool_size) |
||||
mempool_size = new_size |
||||
|
||||
return COutPoint(int(txid, 16), 0) |
||||
|
||||
class ReplaceByFeeTest(BitcoinTestFramework): |
||||
|
||||
def setup_network(self): |
||||
self.nodes = [] |
||||
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug", |
||||
"-relaypriority=0", "-whitelist=127.0.0.1", |
||||
"-limitancestorcount=50", |
||||
"-limitancestorsize=101", |
||||
"-limitdescendantcount=200", |
||||
"-limitdescendantsize=101" |
||||
])) |
||||
self.is_network_split = False |
||||
|
||||
def run_test(self): |
||||
make_utxo(self.nodes[0], 1*COIN) |
||||
|
||||
print "Running test simple doublespend..." |
||||
self.test_simple_doublespend() |
||||
|
||||
print "Running test doublespend chain..." |
||||
self.test_doublespend_chain() |
||||
|
||||
print "Running test doublespend tree..." |
||||
self.test_doublespend_tree() |
||||
|
||||
print "Running test replacement feeperkb..." |
||||
self.test_replacement_feeperkb() |
||||
|
||||
print "Running test spends of conflicting outputs..." |
||||
self.test_spends_of_conflicting_outputs() |
||||
|
||||
print "Running test new unconfirmed inputs..." |
||||
self.test_new_unconfirmed_inputs() |
||||
|
||||
print "Running test too many replacements..." |
||||
self.test_too_many_replacements() |
||||
|
||||
print "Running test opt-in..." |
||||
self.test_opt_in() |
||||
|
||||
print "Running test prioritised transactions..." |
||||
self.test_prioritised_transactions() |
||||
|
||||
print "Passed\n" |
||||
|
||||
def test_simple_doublespend(self): |
||||
"""Simple doublespend""" |
||||
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN) |
||||
|
||||
tx1a = CTransaction() |
||||
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] |
||||
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] |
||||
tx1a_hex = txToHex(tx1a) |
||||
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) |
||||
|
||||
# Should fail because we haven't changed the fee |
||||
tx1b = CTransaction() |
||||
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] |
||||
tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))] |
||||
tx1b_hex = txToHex(tx1b) |
||||
|
||||
try: |
||||
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) |
||||
except JSONRPCException as exp: |
||||
assert_equal(exp.error['code'], -26) # insufficient fee |
||||
else: |
||||
assert(False) |
||||
|
||||
# Extra 0.1 BTC fee |
||||
tx1b = CTransaction() |
||||
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] |
||||
tx1b.vout = [CTxOut(0.9*COIN, CScript([b'b']))] |
||||
tx1b_hex = txToHex(tx1b) |
||||
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) |
||||
|
||||
mempool = self.nodes[0].getrawmempool() |
||||
|
||||
assert (tx1a_txid not in mempool) |
||||
assert (tx1b_txid in mempool) |
||||
|
||||
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid)) |
||||
|
||||
def test_doublespend_chain(self): |
||||
"""Doublespend of a long chain""" |
||||
|
||||
initial_nValue = 50*COIN |
||||
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) |
||||
|
||||
prevout = tx0_outpoint |
||||
remaining_value = initial_nValue |
||||
chain_txids = [] |
||||
while remaining_value > 10*COIN: |
||||
remaining_value -= 1*COIN |
||||
tx = CTransaction() |
||||
tx.vin = [CTxIn(prevout, nSequence=0)] |
||||
tx.vout = [CTxOut(remaining_value, CScript([1]))] |
||||
tx_hex = txToHex(tx) |
||||
txid = self.nodes[0].sendrawtransaction(tx_hex, True) |
||||
chain_txids.append(txid) |
||||
prevout = COutPoint(int(txid, 16), 0) |
||||
|
||||
# Whether the double-spend is allowed is evaluated by including all |
||||
# child fees - 40 BTC - so this attempt is rejected. |
||||
dbl_tx = CTransaction() |
||||
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] |
||||
dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))] |
||||
dbl_tx_hex = txToHex(dbl_tx) |
||||
|
||||
try: |
||||
self.nodes[0].sendrawtransaction(dbl_tx_hex, True) |
||||
except JSONRPCException as exp: |
||||
assert_equal(exp.error['code'], -26) # insufficient fee |
||||
else: |
||||
assert(False) # transaction mistakenly accepted! |
||||
|
||||
# Accepted with sufficient fee |
||||
dbl_tx = CTransaction() |
||||
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] |
||||
dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))] |
||||
dbl_tx_hex = txToHex(dbl_tx) |
||||
self.nodes[0].sendrawtransaction(dbl_tx_hex, True) |
||||
|
||||
mempool = self.nodes[0].getrawmempool() |
||||
for doublespent_txid in chain_txids: |
||||
assert(doublespent_txid not in mempool) |
||||
|
||||
def test_doublespend_tree(self): |
||||
"""Doublespend of a big tree of transactions""" |
||||
|
||||
initial_nValue = 50*COIN |
||||
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) |
||||
|
||||
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None): |
||||
if _total_txs is None: |
||||
_total_txs = [0] |
||||
if _total_txs[0] >= max_txs: |
||||
return |
||||
|
||||
txout_value = (initial_value - fee) // tree_width |
||||
if txout_value < fee: |
||||
return |
||||
|
||||
vout = [CTxOut(txout_value, CScript([i+1])) |
||||
for i in range(tree_width)] |
||||
tx = CTransaction() |
||||
tx.vin = [CTxIn(prevout, nSequence=0)] |
||||
tx.vout = vout |
||||
tx_hex = txToHex(tx) |
||||
|
||||
assert(len(tx.serialize()) < 100000) |
||||
txid = self.nodes[0].sendrawtransaction(tx_hex, True) |
||||
yield tx |
||||
_total_txs[0] += 1 |
||||
|
||||
txid = int(txid, 16) |
||||
|
||||
for i, txout in enumerate(tx.vout): |
||||
for x in branch(COutPoint(txid, i), txout_value, |
||||
max_txs, |
||||
tree_width=tree_width, fee=fee, |
||||
_total_txs=_total_txs): |
||||
yield x |
||||
|
||||
fee = 0.0001*COIN |
||||
n = MAX_REPLACEMENT_LIMIT |
||||
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) |
||||
assert_equal(len(tree_txs), n) |
||||
|
||||
# Attempt double-spend, will fail because too little fee paid |
||||
dbl_tx = CTransaction() |
||||
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] |
||||
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))] |
||||
dbl_tx_hex = txToHex(dbl_tx) |
||||
try: |
||||
self.nodes[0].sendrawtransaction(dbl_tx_hex, True) |
||||
except JSONRPCException as exp: |
||||
assert_equal(exp.error['code'], -26) # insufficient fee |
||||
else: |
||||
assert(False) |
||||
|
||||
# 1 BTC fee is enough |
||||
dbl_tx = CTransaction() |
||||
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] |
||||
dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))] |
||||
dbl_tx_hex = txToHex(dbl_tx) |
||||
self.nodes[0].sendrawtransaction(dbl_tx_hex, True) |
||||
|
||||
mempool = self.nodes[0].getrawmempool() |
||||
|
||||
for tx in tree_txs: |
||||
tx.rehash() |
||||
assert (tx.hash not in mempool) |
||||
|
||||
# Try again, but with more total transactions than the "max txs |
||||
# double-spent at once" anti-DoS limit. |
||||
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2): |
||||
fee = 0.0001*COIN |
||||
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) |
||||
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) |
||||
assert_equal(len(tree_txs), n) |
||||
|
||||
dbl_tx = CTransaction() |
||||
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] |
||||
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))] |
||||
dbl_tx_hex = txToHex(dbl_tx) |
||||
try: |
||||
self.nodes[0].sendrawtransaction(dbl_tx_hex, True) |
||||
except JSONRPCException as exp: |
||||
assert_equal(exp.error['code'], -26) |
||||
assert_equal("too many potential replacements" in exp.error['message'], True) |
||||
else: |
||||
assert(False) |
||||
|
||||
for tx in tree_txs: |
||||
tx.rehash() |
||||
self.nodes[0].getrawtransaction(tx.hash) |
||||
|
||||
def test_replacement_feeperkb(self): |
||||
"""Replacement requires fee-per-KB to be higher""" |
||||
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN) |
||||
|
||||
tx1a = CTransaction() |
||||
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] |
||||
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] |
||||
tx1a_hex = txToHex(tx1a) |
||||
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) |
||||
|
||||
# Higher fee, but the fee per KB is much lower, so the replacement is |
||||
# rejected. |
||||
tx1b = CTransaction() |
||||
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] |
||||
tx1b.vout = [CTxOut(0.001*COIN, CScript([b'a'*999000]))] |
||||
tx1b_hex = txToHex(tx1b) |
||||
|
||||
try: |
||||
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) |
||||
except JSONRPCException as exp: |
||||
assert_equal(exp.error['code'], -26) # insufficient fee |
||||
else: |
||||
assert(False) |
||||
|
||||
def test_spends_of_conflicting_outputs(self): |
||||
"""Replacements that spend conflicting tx outputs are rejected""" |
||||
utxo1 = make_utxo(self.nodes[0], 1.2*COIN) |
||||
utxo2 = make_utxo(self.nodes[0], 3.0*COIN) |
||||
|
||||
tx1a = CTransaction() |
||||
tx1a.vin = [CTxIn(utxo1, nSequence=0)] |
||||
tx1a.vout = [CTxOut(1.1*COIN, CScript([b'a']))] |
||||
tx1a_hex = txToHex(tx1a) |
||||
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) |
||||
|
||||
tx1a_txid = int(tx1a_txid, 16) |
||||
|
||||
# Direct spend an output of the transaction we're replacing. |
||||
tx2 = CTransaction() |
||||
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)] |
||||
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)) |
||||
tx2.vout = tx1a.vout |
||||
tx2_hex = txToHex(tx2) |
||||
|
||||
try: |
||||
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True) |
||||
except JSONRPCException as exp: |
||||
assert_equal(exp.error['code'], -26) |
||||
else: |
||||
assert(False) |
||||
|
||||
# Spend tx1a's output to test the indirect case. |
||||
tx1b = CTransaction() |
||||
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] |
||||
tx1b.vout = [CTxOut(1.0*COIN, CScript([b'a']))] |
||||
tx1b_hex = txToHex(tx1b) |
||||
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) |
||||
tx1b_txid = int(tx1b_txid, 16) |
||||
|
||||
tx2 = CTransaction() |
||||
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0), |
||||
CTxIn(COutPoint(tx1b_txid, 0))] |
||||
tx2.vout = tx1a.vout |
||||
tx2_hex = txToHex(tx2) |
||||
|
||||
try: |
||||
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True) |
||||
except JSONRPCException as exp: |
||||
assert_equal(exp.error['code'], -26) |
||||
else: |
||||
assert(False) |
||||
|
||||
def test_new_unconfirmed_inputs(self): |
||||
"""Replacements that add new unconfirmed inputs are rejected""" |
||||
confirmed_utxo = make_utxo(self.nodes[0], 1.1*COIN) |
||||
unconfirmed_utxo = make_utxo(self.nodes[0], 0.1*COIN, False) |
||||
|
||||
tx1 = CTransaction() |
||||
tx1.vin = [CTxIn(confirmed_utxo)] |
||||
tx1.vout = [CTxOut(1.0*COIN, CScript([b'a']))] |
||||
tx1_hex = txToHex(tx1) |
||||
tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True) |
||||
|
||||
tx2 = CTransaction() |
||||
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)] |
||||
tx2.vout = tx1.vout |
||||
tx2_hex = txToHex(tx2) |
||||
|
||||
try: |
||||
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True) |
||||
except JSONRPCException as exp: |
||||
assert_equal(exp.error['code'], -26) |
||||
else: |
||||
assert(False) |
||||
|
||||
def test_too_many_replacements(self): |
||||
"""Replacements that evict too many transactions are rejected""" |
||||
# Try directly replacing more than MAX_REPLACEMENT_LIMIT |
||||
# transactions |
||||
|
||||
# Start by creating a single transaction with many outputs |
||||
initial_nValue = 10*COIN |
||||
utxo = make_utxo(self.nodes[0], initial_nValue) |
||||
fee = 0.0001*COIN |
||||
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1)) |
||||
actual_fee = initial_nValue - split_value*(MAX_REPLACEMENT_LIMIT+1) |
||||
|
||||
outputs = [] |
||||
for i in range(MAX_REPLACEMENT_LIMIT+1): |
||||
outputs.append(CTxOut(split_value, CScript([1]))) |
||||
|
||||
splitting_tx = CTransaction() |
||||
splitting_tx.vin = [CTxIn(utxo, nSequence=0)] |
||||
splitting_tx.vout = outputs |
||||
splitting_tx_hex = txToHex(splitting_tx) |
||||
|
||||
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True) |
||||
txid = int(txid, 16) |
||||
|
||||
# Now spend each of those outputs individually |
||||
for i in range(MAX_REPLACEMENT_LIMIT+1): |
||||
tx_i = CTransaction() |
||||
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)] |
||||
tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))] |
||||
tx_i_hex = txToHex(tx_i) |
||||
self.nodes[0].sendrawtransaction(tx_i_hex, True) |
||||
|
||||
# Now create doublespend of the whole lot; should fail. |
||||
# Need a big enough fee to cover all spending transactions and have |
||||
# a higher fee rate |
||||
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1) |
||||
inputs = [] |
||||
for i in range(MAX_REPLACEMENT_LIMIT+1): |
||||
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0)) |
||||
double_tx = CTransaction() |
||||
double_tx.vin = inputs |
||||
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] |
||||
double_tx_hex = txToHex(double_tx) |
||||
|
||||
try: |
||||
self.nodes[0].sendrawtransaction(double_tx_hex, True) |
||||
except JSONRPCException as exp: |
||||
assert_equal(exp.error['code'], -26) |
||||
assert_equal("too many potential replacements" in exp.error['message'], True) |
||||
else: |
||||
assert(False) |
||||
|
||||
# If we remove an input, it should pass |
||||
double_tx = CTransaction() |
||||
double_tx.vin = inputs[0:-1] |
||||
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] |
||||
double_tx_hex = txToHex(double_tx) |
||||
self.nodes[0].sendrawtransaction(double_tx_hex, True) |
||||
|
||||
def test_opt_in(self): |
||||
""" Replacing should only work if orig tx opted in """ |
||||
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN) |
||||
|
||||
# Create a non-opting in transaction |
||||
tx1a = CTransaction() |
||||
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)] |
||||
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] |
||||
tx1a_hex = txToHex(tx1a) |
||||
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) |
||||
|
||||
# Shouldn't be able to double-spend |
||||
tx1b = CTransaction() |
||||
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] |
||||
tx1b.vout = [CTxOut(0.9*COIN, CScript([b'b']))] |
||||
tx1b_hex = txToHex(tx1b) |
||||
|
||||
try: |
||||
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) |
||||
except JSONRPCException as exp: |
||||
assert_equal(exp.error['code'], -26) |
||||
else: |
||||
print tx1b_txid |
||||
assert(False) |
||||
|
||||
tx1_outpoint = make_utxo(self.nodes[0], 1.1*COIN) |
||||
|
||||
# Create a different non-opting in transaction |
||||
tx2a = CTransaction() |
||||
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)] |
||||
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] |
||||
tx2a_hex = txToHex(tx2a) |
||||
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True) |
||||
|
||||
# Still shouldn't be able to double-spend |
||||
tx2b = CTransaction() |
||||
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] |
||||
tx2b.vout = [CTxOut(0.9*COIN, CScript([b'b']))] |
||||
tx2b_hex = txToHex(tx2b) |
||||
|
||||
try: |
||||
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True) |
||||
except JSONRPCException as exp: |
||||
assert_equal(exp.error['code'], -26) |
||||
else: |
||||
assert(False) |
||||
|
||||
# Now create a new transaction that spends from tx1a and tx2a |
||||
# opt-in on one of the inputs |
||||
# Transaction should be replaceable on either input |
||||
|
||||
tx1a_txid = int(tx1a_txid, 16) |
||||
tx2a_txid = int(tx2a_txid, 16) |
||||
|
||||
tx3a = CTransaction() |
||||
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff), |
||||
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)] |
||||
tx3a.vout = [CTxOut(0.9*COIN, CScript([b'c'])), CTxOut(0.9*COIN, CScript([b'd']))] |
||||
tx3a_hex = txToHex(tx3a) |
||||
|
||||
self.nodes[0].sendrawtransaction(tx3a_hex, True) |
||||
|
||||
tx3b = CTransaction() |
||||
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] |
||||
tx3b.vout = [CTxOut(0.5*COIN, CScript([b'e']))] |
||||
tx3b_hex = txToHex(tx3b) |
||||
|
||||
tx3c = CTransaction() |
||||
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)] |
||||
tx3c.vout = [CTxOut(0.5*COIN, CScript([b'f']))] |
||||
tx3c_hex = txToHex(tx3c) |
||||
|
||||
self.nodes[0].sendrawtransaction(tx3b_hex, True) |
||||
# If tx3b was accepted, tx3c won't look like a replacement, |
||||
# but make sure it is accepted anyway |
||||
self.nodes[0].sendrawtransaction(tx3c_hex, True) |
||||
|
||||
def test_prioritised_transactions(self): |
||||
# Ensure that fee deltas used via prioritisetransaction are |
||||
# correctly used by replacement logic |
||||
|
||||
# 1. Check that feeperkb uses modified fees |
||||
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN) |
||||
|
||||
tx1a = CTransaction() |
||||
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] |
||||
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] |
||||
tx1a_hex = txToHex(tx1a) |
||||
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) |
||||
|
||||
# Higher fee, but the actual fee per KB is much lower. |
||||
tx1b = CTransaction() |
||||
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] |
||||
tx1b.vout = [CTxOut(0.001*COIN, CScript([b'a'*740000]))] |
||||
tx1b_hex = txToHex(tx1b) |
||||
|
||||
# Verify tx1b cannot replace tx1a. |
||||
try: |
||||
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) |
||||
except JSONRPCException as exp: |
||||
assert_equal(exp.error['code'], -26) |
||||
else: |
||||
assert(False) |
||||
|
||||
# Use prioritisetransaction to set tx1a's fee to 0. |
||||
self.nodes[0].prioritisetransaction(tx1a_txid, 0, int(-0.1*COIN)) |
||||
|
||||
# Now tx1b should be able to replace tx1a |
||||
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) |
||||
|
||||
assert(tx1b_txid in self.nodes[0].getrawmempool()) |
||||
|
||||
# 2. Check that absolute fee checks use modified fee. |
||||
tx1_outpoint = make_utxo(self.nodes[0], 1.1*COIN) |
||||
|
||||
tx2a = CTransaction() |
||||
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)] |
||||
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] |
||||
tx2a_hex = txToHex(tx2a) |
||||
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True) |
||||
|
||||
# Lower fee, but we'll prioritise it |
||||
tx2b = CTransaction() |
||||
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] |
||||
tx2b.vout = [CTxOut(1.01*COIN, CScript([b'a']))] |
||||
tx2b.rehash() |
||||
tx2b_hex = txToHex(tx2b) |
||||
|
||||
# Verify tx2b cannot replace tx2a. |
||||
try: |
||||
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True) |
||||
except JSONRPCException as exp: |
||||
assert_equal(exp.error['code'], -26) |
||||
else: |
||||
assert(False) |
||||
|
||||
# Now prioritise tx2b to have a higher modified fee |
||||
self.nodes[0].prioritisetransaction(tx2b.hash, 0, int(0.1*COIN)) |
||||
|
||||
# tx2b should now be accepted |
||||
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True) |
||||
|
||||
assert(tx2b_txid in self.nodes[0].getrawmempool()) |
||||
|
||||
if __name__ == '__main__': |
||||
ReplaceByFeeTest().main() |
@ -0,0 +1,517 @@
@@ -0,0 +1,517 @@
|
||||
#!/usr/bin/env python2 |
||||
# Copyright (c) 2014-2015 The Bitcoin Core developers |
||||
# Distributed under the MIT/X11 software license, see the accompanying |
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php. |
||||
# |
||||
|
||||
from test_framework.mininode import * |
||||
from test_framework.test_framework import BitcoinTestFramework |
||||
from test_framework.util import * |
||||
from test_framework.blocktools import create_block, create_coinbase |
||||
|
||||
''' |
||||
SendHeadersTest -- test behavior of headers messages to announce blocks. |
||||
|
||||
Setup: |
||||
|
||||
- Two nodes, two p2p connections to node0. One p2p connection should only ever |
||||
receive inv's (omitted from testing description below, this is our control). |
||||
Second node is used for creating reorgs. |
||||
|
||||
Part 1: No headers announcements before "sendheaders" |
||||
a. node mines a block [expect: inv] |
||||
send getdata for the block [expect: block] |
||||
b. node mines another block [expect: inv] |
||||
send getheaders and getdata [expect: headers, then block] |
||||
c. node mines another block [expect: inv] |
||||
peer mines a block, announces with header [expect: getdata] |
||||
d. node mines another block [expect: inv] |
||||
|
||||
Part 2: After "sendheaders", headers announcements should generally work. |
||||
a. peer sends sendheaders [expect: no response] |
||||
peer sends getheaders with current tip [expect: no response] |
||||
b. node mines a block [expect: tip header] |
||||
c. for N in 1, ..., 10: |
||||
* for announce-type in {inv, header} |
||||
- peer mines N blocks, announces with announce-type |
||||
[ expect: getheaders/getdata or getdata, deliver block(s) ] |
||||
- node mines a block [ expect: 1 header ] |
||||
|
||||
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer. |
||||
- For response-type in {inv, getheaders} |
||||
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ] |
||||
* node mines an 8-block reorg [ expect: inv at tip ] |
||||
* peer responds with getblocks/getdata [expect: inv, blocks ] |
||||
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ] |
||||
* node mines another block at tip [ expect: inv ] |
||||
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers] |
||||
* peer requests block [ expect: block ] |
||||
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ] |
||||
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block] |
||||
* node mines 1 block [expect: 1 header, peer responds with getdata] |
||||
|
||||
Part 4: Test direct fetch behavior |
||||
a. Announce 2 old block headers. |
||||
Expect: no getdata requests. |
||||
b. Announce 3 new blocks via 1 headers message. |
||||
Expect: one getdata request for all 3 blocks. |
||||
(Send blocks.) |
||||
c. Announce 1 header that forks off the last two blocks. |
||||
Expect: no response. |
||||
d. Announce 1 more header that builds on that fork. |
||||
Expect: one getdata request for two blocks. |
||||
e. Announce 16 more headers that build on that fork. |
||||
Expect: getdata request for 14 more blocks. |
||||
f. Announce 1 more header that builds on that fork. |
||||
Expect: no response. |
||||
''' |
||||
|
||||
class BaseNode(NodeConnCB): |
||||
def __init__(self): |
||||
NodeConnCB.__init__(self) |
||||
self.connection = None |
||||
self.last_inv = None |
||||
self.last_headers = None |
||||
self.last_block = None |
||||
self.ping_counter = 1 |
||||
self.last_pong = msg_pong(0) |
||||
self.last_getdata = None |
||||
self.sleep_time = 0.05 |
||||
self.block_announced = False |
||||
|
||||
def clear_last_announcement(self): |
||||
with mininode_lock: |
||||
self.block_announced = False |
||||
self.last_inv = None |
||||
self.last_headers = None |
||||
|
||||
def add_connection(self, conn): |
||||
self.connection = conn |
||||
|
||||
# Request data for a list of block hashes |
||||
def get_data(self, block_hashes): |
||||
msg = msg_getdata() |
||||
for x in block_hashes: |
||||
msg.inv.append(CInv(2, x)) |
||||
self.connection.send_message(msg) |
||||
|
||||
def get_headers(self, locator, hashstop): |
||||
msg = msg_getheaders() |
||||
msg.locator.vHave = locator |
||||
msg.hashstop = hashstop |
||||
self.connection.send_message(msg) |
||||
|
||||
def send_block_inv(self, blockhash): |
||||
msg = msg_inv() |
||||
msg.inv = [CInv(2, blockhash)] |
||||
self.connection.send_message(msg) |
||||
|
||||
# Wrapper for the NodeConn's send_message function |
||||
def send_message(self, message): |
||||
self.connection.send_message(message) |
||||
|
||||
def on_inv(self, conn, message): |
||||
self.last_inv = message |
||||
self.block_announced = True |
||||
|
||||
def on_headers(self, conn, message): |
||||
self.last_headers = message |
||||
self.block_announced = True |
||||
|
||||
def on_block(self, conn, message): |
||||
self.last_block = message.block |
||||
self.last_block.calc_sha256() |
||||
|
||||
def on_getdata(self, conn, message): |
||||
self.last_getdata = message |
||||
|
||||
def on_pong(self, conn, message): |
||||
self.last_pong = message |
||||
|
||||
# Test whether the last announcement we received had the |
||||
# right header or the right inv |
||||
# inv and headers should be lists of block hashes |
||||
def check_last_announcement(self, headers=None, inv=None): |
||||
expect_headers = headers if headers != None else [] |
||||
expect_inv = inv if inv != None else [] |
||||
test_function = lambda: self.block_announced |
||||
self.sync(test_function) |
||||
with mininode_lock: |
||||
self.block_announced = False |
||||
|
||||
success = True |
||||
compare_inv = [] |
||||
if self.last_inv != None: |
||||
compare_inv = [x.hash for x in self.last_inv.inv] |
||||
if compare_inv != expect_inv: |
||||
success = False |
||||
|
||||
hash_headers = [] |
||||
if self.last_headers != None: |
||||
# treat headers as a list of block hashes |
||||
hash_headers = [ x.sha256 for x in self.last_headers.headers ] |
||||
if hash_headers != expect_headers: |
||||
success = False |
||||
|
||||
self.last_inv = None |
||||
self.last_headers = None |
||||
return success |
||||
|
||||
# Syncing helpers |
||||
def sync(self, test_function, timeout=60): |
||||
while timeout > 0: |
||||
with mininode_lock: |
||||
if test_function(): |
||||
return |
||||
time.sleep(self.sleep_time) |
||||
timeout -= self.sleep_time |
||||
raise AssertionError("Sync failed to complete") |
||||
|
||||
def sync_with_ping(self, timeout=60): |
||||
self.send_message(msg_ping(nonce=self.ping_counter)) |
||||
test_function = lambda: self.last_pong.nonce == self.ping_counter |
||||
self.sync(test_function, timeout) |
||||
self.ping_counter += 1 |
||||
return |
||||
|
||||
def wait_for_block(self, blockhash, timeout=60): |
||||
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash |
||||
self.sync(test_function, timeout) |
||||
return |
||||
|
||||
def wait_for_getdata(self, hash_list, timeout=60): |
||||
if hash_list == []: |
||||
return |
||||
|
||||
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list |
||||
self.sync(test_function, timeout) |
||||
return |
||||
|
||||
def send_header_for_blocks(self, new_blocks): |
||||
headers_message = msg_headers() |
||||
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ] |
||||
self.send_message(headers_message) |
||||
|
||||
def send_getblocks(self, locator): |
||||
getblocks_message = msg_getblocks() |
||||
getblocks_message.locator.vHave = locator |
||||
self.send_message(getblocks_message) |
||||
|
||||
# InvNode: This peer should only ever receive inv's, because it doesn't ever send a |
||||
# "sendheaders" message. |
||||
class InvNode(BaseNode): |
||||
def __init__(self): |
||||
BaseNode.__init__(self) |
||||
|
||||
# TestNode: This peer is the one we use for most of the testing. |
||||
class TestNode(BaseNode): |
||||
def __init__(self): |
||||
BaseNode.__init__(self) |
||||
|
||||
class SendHeadersTest(BitcoinTestFramework): |
||||
def setup_chain(self): |
||||
initialize_chain_clean(self.options.tmpdir, 2) |
||||
|
||||
def setup_network(self): |
||||
self.nodes = [] |
||||
self.nodes = start_nodes(2, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2) |
||||
connect_nodes(self.nodes[0], 1) |
||||
|
||||
# mine count blocks and return the new tip |
||||
def mine_blocks(self, count): |
||||
# Clear out last block announcement from each p2p listener |
||||
[ x.clear_last_announcement() for x in self.p2p_connections ] |
||||
self.nodes[0].generate(count) |
||||
return int(self.nodes[0].getbestblockhash(), 16) |
||||
|
||||
# mine a reorg that invalidates length blocks (replacing them with |
||||
# length+1 blocks). |
||||
# Note: we clear the state of our p2p connections after the |
||||
# to-be-reorged-out blocks are mined, so that we don't break later tests. |
||||
# return the list of block hashes newly mined |
||||
def mine_reorg(self, length): |
||||
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's |
||||
sync_blocks(self.nodes, wait=0.1) |
||||
[x.clear_last_announcement() for x in self.p2p_connections] |
||||
|
||||
tip_height = self.nodes[1].getblockcount() |
||||
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1)) |
||||
self.nodes[1].invalidateblock(hash_to_invalidate) |
||||
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain |
||||
sync_blocks(self.nodes, wait=0.1) |
||||
return [int(x, 16) for x in all_hashes] |
||||
|
||||
def run_test(self): |
||||
# Setup the p2p connections and start up the network thread. |
||||
inv_node = InvNode() |
||||
test_node = TestNode() |
||||
|
||||
self.p2p_connections = [inv_node, test_node] |
||||
|
||||
connections = [] |
||||
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node)) |
||||
# Set nServices to 0 for test_node, so no block download will occur outside of |
||||
# direct fetching |
||||
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0)) |
||||
inv_node.add_connection(connections[0]) |
||||
test_node.add_connection(connections[1]) |
||||
|
||||
NetworkThread().start() # Start up network handling in another thread |
||||
|
||||
# Test logic begins here |
||||
inv_node.wait_for_verack() |
||||
test_node.wait_for_verack() |
||||
|
||||
tip = int(self.nodes[0].getbestblockhash(), 16) |
||||
|
||||
# PART 1 |
||||
# 1. Mine a block; expect inv announcements each time |
||||
print "Part 1: headers don't start before sendheaders message..." |
||||
for i in xrange(4): |
||||
old_tip = tip |
||||
tip = self.mine_blocks(1) |
||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True) |
||||
assert_equal(test_node.check_last_announcement(inv=[tip]), True) |
||||
# Try a few different responses; none should affect next announcement |
||||
if i == 0: |
||||
# first request the block |
||||
test_node.get_data([tip]) |
||||
test_node.wait_for_block(tip, timeout=5) |
||||
elif i == 1: |
||||
# next try requesting header and block |
||||
test_node.get_headers(locator=[old_tip], hashstop=tip) |
||||
test_node.get_data([tip]) |
||||
test_node.wait_for_block(tip) |
||||
test_node.clear_last_announcement() # since we requested headers... |
||||
elif i == 2: |
||||
# this time announce own block via headers |
||||
height = self.nodes[0].getblockcount() |
||||
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] |
||||
block_time = last_time + 1 |
||||
new_block = create_block(tip, create_coinbase(height+1), block_time) |
||||
new_block.solve() |
||||
test_node.send_header_for_blocks([new_block]) |
||||
test_node.wait_for_getdata([new_block.sha256], timeout=5) |
||||
test_node.send_message(msg_block(new_block)) |
||||
test_node.sync_with_ping() # make sure this block is processed |
||||
inv_node.clear_last_announcement() |
||||
test_node.clear_last_announcement() |
||||
|
||||
print "Part 1: success!" |
||||
print "Part 2: announce blocks with headers after sendheaders message..." |
||||
# PART 2 |
||||
# 2. Send a sendheaders message and test that headers announcements |
||||
# commence and keep working. |
||||
test_node.send_message(msg_sendheaders()) |
||||
prev_tip = int(self.nodes[0].getbestblockhash(), 16) |
||||
test_node.get_headers(locator=[prev_tip], hashstop=0L) |
||||
test_node.sync_with_ping() |
||||
|
||||
# Now that we've synced headers, headers announcements should work |
||||
tip = self.mine_blocks(1) |
||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True) |
||||
assert_equal(test_node.check_last_announcement(headers=[tip]), True) |
||||
|
||||
height = self.nodes[0].getblockcount()+1 |
||||
block_time += 10 # Advance far enough ahead |
||||
for i in xrange(10): |
||||
# Mine i blocks, and alternate announcing either via |
||||
# inv (of tip) or via headers. After each, new blocks |
||||
# mined by the node should successfully be announced |
||||
# with block header, even though the blocks are never requested |
||||
for j in xrange(2): |
||||
blocks = [] |
||||
for b in xrange(i+1): |
||||
blocks.append(create_block(tip, create_coinbase(height), block_time)) |
||||
blocks[-1].solve() |
||||
tip = blocks[-1].sha256 |
||||
block_time += 1 |
||||
height += 1 |
||||
if j == 0: |
||||
# Announce via inv |
||||
test_node.send_block_inv(tip) |
||||
test_node.wait_for_getdata([tip], timeout=5) |
||||
# Test that duplicate inv's won't result in duplicate |
||||
# getdata requests, or duplicate headers announcements |
||||
inv_node.send_block_inv(tip) |
||||
# Should have received a getheaders as well! |
||||
test_node.send_header_for_blocks(blocks) |
||||
test_node.wait_for_getdata([x.sha256 for x in blocks[0:-1]], timeout=5) |
||||
[ inv_node.send_block_inv(x.sha256) for x in blocks[0:-1] ] |
||||
inv_node.sync_with_ping() |
||||
else: |
||||
# Announce via headers |
||||
test_node.send_header_for_blocks(blocks) |
||||
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5) |
||||
# Test that duplicate headers won't result in duplicate |
||||
# getdata requests (the check is further down) |
||||
inv_node.send_header_for_blocks(blocks) |
||||
inv_node.sync_with_ping() |
||||
[ test_node.send_message(msg_block(x)) for x in blocks ] |
||||
test_node.sync_with_ping() |
||||
inv_node.sync_with_ping() |
||||
# This block should not be announced to the inv node (since it also |
||||
# broadcast it) |
||||
assert_equal(inv_node.last_inv, None) |
||||
assert_equal(inv_node.last_headers, None) |
||||
tip = self.mine_blocks(1) |
||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True) |
||||
assert_equal(test_node.check_last_announcement(headers=[tip]), True) |
||||
height += 1 |
||||
block_time += 1 |
||||
|
||||
print "Part 2: success!" |
||||
|
||||
print "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer..." |
||||
|
||||
# PART 3. Headers announcements can stop after large reorg, and resume after |
||||
# getheaders or inv from peer. |
||||
for j in xrange(2): |
||||
# First try mining a reorg that can propagate with header announcement |
||||
new_block_hashes = self.mine_reorg(length=7) |
||||
tip = new_block_hashes[-1] |
||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True) |
||||
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True) |
||||
|
||||
block_time += 8 |
||||
|
||||
# Mine a too-large reorg, which should be announced with a single inv |
||||
new_block_hashes = self.mine_reorg(length=8) |
||||
tip = new_block_hashes[-1] |
||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True) |
||||
assert_equal(test_node.check_last_announcement(inv=[tip]), True) |
||||
|
||||
block_time += 9 |
||||
|
||||
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"] |
||||
fork_point = int(fork_point, 16) |
||||
|
||||
# Use getblocks/getdata |
||||
test_node.send_getblocks(locator = [fork_point]) |
||||
assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True) |
||||
test_node.get_data(new_block_hashes) |
||||
test_node.wait_for_block(new_block_hashes[-1]) |
||||
|
||||
for i in xrange(3): |
||||
# Mine another block, still should get only an inv |
||||
tip = self.mine_blocks(1) |
||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True) |
||||
assert_equal(test_node.check_last_announcement(inv=[tip]), True) |
||||
if i == 0: |
||||
# Just get the data -- shouldn't cause headers announcements to resume |
||||
test_node.get_data([tip]) |
||||
test_node.wait_for_block(tip) |
||||
elif i == 1: |
||||
# Send a getheaders message that shouldn't trigger headers announcements |
||||
# to resume (best header sent will be too old) |
||||
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1]) |
||||
test_node.get_data([tip]) |
||||
test_node.wait_for_block(tip) |
||||
elif i == 2: |
||||
test_node.get_data([tip]) |
||||
test_node.wait_for_block(tip) |
||||
# This time, try sending either a getheaders to trigger resumption |
||||
# of headers announcements, or mine a new block and inv it, also |
||||
# triggering resumption of headers announcements. |
||||
if j == 0: |
||||
test_node.get_headers(locator=[tip], hashstop=0L) |
||||
test_node.sync_with_ping() |
||||
else: |
||||
test_node.send_block_inv(tip) |
||||
test_node.sync_with_ping() |
||||
# New blocks should now be announced with header |
||||
tip = self.mine_blocks(1) |
||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True) |
||||
assert_equal(test_node.check_last_announcement(headers=[tip]), True) |
||||
|
||||
print "Part 3: success!" |
||||
|
||||
print "Part 4: Testing direct fetch behavior..." |
||||
tip = self.mine_blocks(1) |
||||
height = self.nodes[0].getblockcount() + 1 |
||||
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] |
||||
block_time = last_time + 1 |
||||
|
||||
# Create 2 blocks. Send the blocks, then send the headers. |
||||
blocks = [] |
||||
for b in xrange(2): |
||||
blocks.append(create_block(tip, create_coinbase(height), block_time)) |
||||
blocks[-1].solve() |
||||
tip = blocks[-1].sha256 |
||||
block_time += 1 |
||||
height += 1 |
||||
inv_node.send_message(msg_block(blocks[-1])) |
||||
|
||||
inv_node.sync_with_ping() # Make sure blocks are processed |
||||
test_node.last_getdata = None |
||||
test_node.send_header_for_blocks(blocks) |
||||
test_node.sync_with_ping() |
||||
# should not have received any getdata messages |
||||
with mininode_lock: |
||||
assert_equal(test_node.last_getdata, None) |
||||
|
||||
# This time, direct fetch should work |
||||
blocks = [] |
||||
for b in xrange(3): |
||||
blocks.append(create_block(tip, create_coinbase(height), block_time)) |
||||
blocks[-1].solve() |
||||
tip = blocks[-1].sha256 |
||||
block_time += 1 |
||||
height += 1 |
||||
|
||||
test_node.send_header_for_blocks(blocks) |
||||
test_node.sync_with_ping() |
||||
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=test_node.sleep_time) |
||||
|
||||
[ test_node.send_message(msg_block(x)) for x in blocks ] |
||||
|
||||
test_node.sync_with_ping() |
||||
|
||||
# Now announce a header that forks the last two blocks |
||||
tip = blocks[0].sha256 |
||||
height -= 1 |
||||
blocks = [] |
||||
|
||||
# Create extra blocks for later |
||||
for b in xrange(20): |
||||
blocks.append(create_block(tip, create_coinbase(height), block_time)) |
||||
blocks[-1].solve() |
||||
tip = blocks[-1].sha256 |
||||
block_time += 1 |
||||
height += 1 |
||||
|
||||
# Announcing one block on fork should not trigger direct fetch |
||||
# (less work than tip) |
||||
test_node.last_getdata = None |
||||
test_node.send_header_for_blocks(blocks[0:1]) |
||||
test_node.sync_with_ping() |
||||
with mininode_lock: |
||||
assert_equal(test_node.last_getdata, None) |
||||
|
||||
# Announcing one more block on fork should trigger direct fetch for |
||||
# both blocks (same work as tip) |
||||
test_node.send_header_for_blocks(blocks[1:2]) |
||||
test_node.sync_with_ping() |
||||
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=test_node.sleep_time) |
||||
|
||||
# Announcing 16 more headers should trigger direct fetch for 14 more |
||||
# blocks |
||||
test_node.send_header_for_blocks(blocks[2:18]) |
||||
test_node.sync_with_ping() |
||||
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=test_node.sleep_time) |
||||
|
||||
# Announcing 1 more header should not trigger any response |
||||
test_node.last_getdata = None |
||||
test_node.send_header_for_blocks(blocks[18:19]) |
||||
test_node.sync_with_ping() |
||||
with mininode_lock: |
||||
assert_equal(test_node.last_getdata, None) |
||||
|
||||
print "Part 4: success!" |
||||
|
||||
# Finally, check that the inv node never received a getdata request, |
||||
# throughout the test |
||||
assert_equal(inv_node.last_getdata, None) |
||||
|
||||
if __name__ == '__main__': |
||||
SendHeadersTest().main() |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue