|
|
|
// Copyright (c) 2012-2013 The Bitcoin Core developers
|
|
|
|
// Distributed under the MIT/X11 software license, see the accompanying
|
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
|
|
|
|
#include "serialize.h"
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
|
|
|
|
#include <boost/test/unit_test.hpp>
|
|
|
|
|
|
|
|
using namespace std;
|
|
|
|
|
|
|
|
BOOST_AUTO_TEST_SUITE(serialize_tests)
|
|
|
|
|
|
|
|
BOOST_AUTO_TEST_CASE(varints)
|
|
|
|
{
|
|
|
|
// encode
|
|
|
|
|
|
|
|
CDataStream ss(SER_DISK, 0);
|
|
|
|
CDataStream::size_type size = 0;
|
|
|
|
for (int i = 0; i < 100000; i++) {
|
|
|
|
ss << VARINT(i);
|
|
|
|
size += ::GetSerializeSize(VARINT(i), 0, 0);
|
|
|
|
BOOST_CHECK(size == ss.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint64_t i = 0; i < 100000000000ULL; i += 999999937) {
|
|
|
|
ss << VARINT(i);
|
|
|
|
size += ::GetSerializeSize(VARINT(i), 0, 0);
|
|
|
|
BOOST_CHECK(size == ss.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
// decode
|
|
|
|
for (int i = 0; i < 100000; i++) {
|
|
|
|
int j = -1;
|
|
|
|
ss >> VARINT(j);
|
|
|
|
BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint64_t i = 0; i < 100000000000ULL; i += 999999937) {
|
|
|
|
uint64_t j = -1;
|
|
|
|
ss >> VARINT(j);
|
|
|
|
BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i);
|
|
|
|
}
|
Reject non-canonically-encoded sizes
The length of vectors, maps, sets, etc are serialized using
Write/ReadCompactSize -- which, unfortunately, do not use a
unique encoding.
So deserializing and then re-serializing a transaction (for example)
can give you different bits than you started with. That doesn't
cause any problems that we are aware of, but it is exactly the type
of subtle mismatch that can lead to exploits.
With this pull, reading a non-canonical CompactSize throws an
exception, which means nodes will ignore 'tx' or 'block' or
other messages that are not properly encoded.
Please check my logic... but this change is safe with respect to
causing a network split. Old clients that receive
non-canonically-encoded transactions or blocks deserialize
them into CTransaction/CBlock structures in memory, and then
re-serialize them before relaying them to peers.
And please check my logic with respect to causing a blockchain
split: there are no CompactSize fields in the block header, so
the block hash is always canonical. The merkle root in the block
header is computed on a vector<CTransaction>, so
any non-canonical encoding of the transactions in 'tx' or 'block'
messages is erased as they are read into memory by old clients,
and does not affect the block hash. And, as noted above, old
clients re-serialize (with canonical encoding) 'tx' and 'block'
messages before relaying to peers.
11 years ago
|
|
|
}
|
|
|
|
|
|
|
|
BOOST_AUTO_TEST_CASE(compactsize)
|
|
|
|
{
|
|
|
|
CDataStream ss(SER_DISK, 0);
|
|
|
|
vector<char>::size_type i, j;
|
|
|
|
|
|
|
|
for (i = 1; i <= MAX_SIZE; i *= 2)
|
|
|
|
{
|
|
|
|
WriteCompactSize(ss, i-1);
|
|
|
|
WriteCompactSize(ss, i);
|
|
|
|
}
|
|
|
|
for (i = 1; i <= MAX_SIZE; i *= 2)
|
|
|
|
{
|
|
|
|
j = ReadCompactSize(ss);
|
|
|
|
BOOST_CHECK_MESSAGE((i-1) == j, "decoded:" << j << " expected:" << (i-1));
|
|
|
|
j = ReadCompactSize(ss);
|
|
|
|
BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isCanonicalException(const std::ios_base::failure& ex)
|
|
|
|
{
|
|
|
|
std::ios_base::failure expectedException("non-canonical ReadCompactSize()");
|
|
|
|
|
|
|
|
// The string returned by what() can be different for different platforms.
|
|
|
|
// Instead of directly comparing the ex.what() with an expected string,
|
|
|
|
// create an instance of exception to see if ex.what() matches
|
|
|
|
// the expected explanatory string returned by the exception instance.
|
|
|
|
return strcmp(expectedException.what(), ex.what()) == 0;
|
Reject non-canonically-encoded sizes
The length of vectors, maps, sets, etc are serialized using
Write/ReadCompactSize -- which, unfortunately, do not use a
unique encoding.
So deserializing and then re-serializing a transaction (for example)
can give you different bits than you started with. That doesn't
cause any problems that we are aware of, but it is exactly the type
of subtle mismatch that can lead to exploits.
With this pull, reading a non-canonical CompactSize throws an
exception, which means nodes will ignore 'tx' or 'block' or
other messages that are not properly encoded.
Please check my logic... but this change is safe with respect to
causing a network split. Old clients that receive
non-canonically-encoded transactions or blocks deserialize
them into CTransaction/CBlock structures in memory, and then
re-serialize them before relaying them to peers.
And please check my logic with respect to causing a blockchain
split: there are no CompactSize fields in the block header, so
the block hash is always canonical. The merkle root in the block
header is computed on a vector<CTransaction>, so
any non-canonical encoding of the transactions in 'tx' or 'block'
messages is erased as they are read into memory by old clients,
and does not affect the block hash. And, as noted above, old
clients re-serialize (with canonical encoding) 'tx' and 'block'
messages before relaying to peers.
11 years ago
|
|
|
}
|
|
|
|
|
|
|
|
|
Reject non-canonically-encoded sizes
The length of vectors, maps, sets, etc are serialized using
Write/ReadCompactSize -- which, unfortunately, do not use a
unique encoding.
So deserializing and then re-serializing a transaction (for example)
can give you different bits than you started with. That doesn't
cause any problems that we are aware of, but it is exactly the type
of subtle mismatch that can lead to exploits.
With this pull, reading a non-canonical CompactSize throws an
exception, which means nodes will ignore 'tx' or 'block' or
other messages that are not properly encoded.
Please check my logic... but this change is safe with respect to
causing a network split. Old clients that receive
non-canonically-encoded transactions or blocks deserialize
them into CTransaction/CBlock structures in memory, and then
re-serialize them before relaying them to peers.
And please check my logic with respect to causing a blockchain
split: there are no CompactSize fields in the block header, so
the block hash is always canonical. The merkle root in the block
header is computed on a vector<CTransaction>, so
any non-canonical encoding of the transactions in 'tx' or 'block'
messages is erased as they are read into memory by old clients,
and does not affect the block hash. And, as noted above, old
clients re-serialize (with canonical encoding) 'tx' and 'block'
messages before relaying to peers.
11 years ago
|
|
|
BOOST_AUTO_TEST_CASE(noncanonical)
|
|
|
|
{
|
|
|
|
// Write some non-canonical CompactSize encodings, and
|
|
|
|
// make sure an exception is thrown when read back.
|
|
|
|
CDataStream ss(SER_DISK, 0);
|
|
|
|
vector<char>::size_type n;
|
|
|
|
|
|
|
|
// zero encoded with three bytes:
|
|
|
|
ss.write("\xfd\x00\x00", 3);
|
|
|
|
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
|
|
|
|
|
|
|
|
// 0xfc encoded with three bytes:
|
|
|
|
ss.write("\xfd\xfc\x00", 3);
|
|
|
|
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
|
|
|
|
|
|
|
|
// 0xfd encoded with three bytes is OK:
|
|
|
|
ss.write("\xfd\xfd\x00", 3);
|
|
|
|
n = ReadCompactSize(ss);
|
|
|
|
BOOST_CHECK(n == 0xfd);
|
|
|
|
|
|
|
|
// zero encoded with five bytes:
|
|
|
|
ss.write("\xfe\x00\x00\x00\x00", 5);
|
|
|
|
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
|
|
|
|
|
|
|
|
// 0xffff encoded with five bytes:
|
|
|
|
ss.write("\xfe\xff\xff\x00\x00", 5);
|
|
|
|
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
|
|
|
|
|
|
|
|
// zero encoded with nine bytes:
|
|
|
|
ss.write("\xff\x00\x00\x00\x00\x00\x00\x00\x00", 9);
|
|
|
|
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
|
|
|
|
|
Reject non-canonically-encoded sizes
The length of vectors, maps, sets, etc are serialized using
Write/ReadCompactSize -- which, unfortunately, do not use a
unique encoding.
So deserializing and then re-serializing a transaction (for example)
can give you different bits than you started with. That doesn't
cause any problems that we are aware of, but it is exactly the type
of subtle mismatch that can lead to exploits.
With this pull, reading a non-canonical CompactSize throws an
exception, which means nodes will ignore 'tx' or 'block' or
other messages that are not properly encoded.
Please check my logic... but this change is safe with respect to
causing a network split. Old clients that receive
non-canonically-encoded transactions or blocks deserialize
them into CTransaction/CBlock structures in memory, and then
re-serialize them before relaying them to peers.
And please check my logic with respect to causing a blockchain
split: there are no CompactSize fields in the block header, so
the block hash is always canonical. The merkle root in the block
header is computed on a vector<CTransaction>, so
any non-canonical encoding of the transactions in 'tx' or 'block'
messages is erased as they are read into memory by old clients,
and does not affect the block hash. And, as noted above, old
clients re-serialize (with canonical encoding) 'tx' and 'block'
messages before relaying to peers.
11 years ago
|
|
|
// 0x01ffffff encoded with nine bytes:
|
|
|
|
ss.write("\xff\xff\xff\xff\x01\x00\x00\x00\x00", 9);
|
|
|
|
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
|
|
|
|
}
|
|
|
|
|
|
|
|
BOOST_AUTO_TEST_CASE(insert_delete)
|
|
|
|
{
|
|
|
|
// Test inserting/deleting bytes.
|
|
|
|
CDataStream ss(SER_DISK, 0);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 0);
|
|
|
|
|
|
|
|
ss.write("\x00\x01\x02\xff", 4);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 4);
|
|
|
|
|
|
|
|
char c = (char)11;
|
|
|
|
|
|
|
|
// Inserting at beginning/end/middle:
|
|
|
|
ss.insert(ss.begin(), c);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 5);
|
|
|
|
BOOST_CHECK_EQUAL(ss[0], c);
|
|
|
|
BOOST_CHECK_EQUAL(ss[1], 0);
|
|
|
|
|
|
|
|
ss.insert(ss.end(), c);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 6);
|
|
|
|
BOOST_CHECK_EQUAL(ss[4], (char)0xff);
|
|
|
|
BOOST_CHECK_EQUAL(ss[5], c);
|
|
|
|
|
|
|
|
ss.insert(ss.begin()+2, c);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 7);
|
|
|
|
BOOST_CHECK_EQUAL(ss[2], c);
|
|
|
|
|
|
|
|
// Delete at beginning/end/middle
|
|
|
|
ss.erase(ss.begin());
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 6);
|
|
|
|
BOOST_CHECK_EQUAL(ss[0], 0);
|
|
|
|
|
|
|
|
ss.erase(ss.begin()+ss.size()-1);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 5);
|
|
|
|
BOOST_CHECK_EQUAL(ss[4], (char)0xff);
|
|
|
|
|
|
|
|
ss.erase(ss.begin()+1);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 4);
|
|
|
|
BOOST_CHECK_EQUAL(ss[0], 0);
|
|
|
|
BOOST_CHECK_EQUAL(ss[1], 1);
|
|
|
|
BOOST_CHECK_EQUAL(ss[2], 2);
|
|
|
|
BOOST_CHECK_EQUAL(ss[3], (char)0xff);
|
|
|
|
|
|
|
|
// Make sure GetAndClear does the right thing:
|
|
|
|
CSerializeData d;
|
|
|
|
ss.GetAndClear(d);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
BOOST_AUTO_TEST_SUITE_END()
|