mirror of
https://github.com/kvazar-network/kevacoin.git
synced 2025-01-25 22:34:27 +00:00
rpc: keep track of acceptors, and cancel them in StopRPCThreads
Fixes #4156. The problem is that the boost::asio::io_service destructor waits for the acceptors to finish (on windows, and boost 1.55). Fix this by keeping track of the acceptors and cancelling them before stopping the event loops. Rebased-By: Wladimir J. van der Laan <laanwj@gmail.com> Rebased-From: cef4494
This commit is contained in:
parent
8b1a93f2e5
commit
9f535d4104
@ -38,6 +38,7 @@ static map<string, boost::shared_ptr<deadline_timer> > deadlineTimers;
|
|||||||
static ssl::context* rpc_ssl_context = NULL;
|
static ssl::context* rpc_ssl_context = NULL;
|
||||||
static boost::thread_group* rpc_worker_group = NULL;
|
static boost::thread_group* rpc_worker_group = NULL;
|
||||||
static boost::asio::io_service::work *rpc_dummy_work = NULL;
|
static boost::asio::io_service::work *rpc_dummy_work = NULL;
|
||||||
|
static std::vector< boost::shared_ptr<ip::tcp::acceptor> > rpc_acceptors;
|
||||||
|
|
||||||
void RPCTypeCheck(const Array& params,
|
void RPCTypeCheck(const Array& params,
|
||||||
const list<Value_type>& typesExpected,
|
const list<Value_type>& typesExpected,
|
||||||
@ -559,12 +560,13 @@ void StartRPCThreads()
|
|||||||
asio::ip::address bindAddress = loopback ? asio::ip::address_v6::loopback() : asio::ip::address_v6::any();
|
asio::ip::address bindAddress = loopback ? asio::ip::address_v6::loopback() : asio::ip::address_v6::any();
|
||||||
ip::tcp::endpoint endpoint(bindAddress, GetArg("-rpcport", Params().RPCPort()));
|
ip::tcp::endpoint endpoint(bindAddress, GetArg("-rpcport", Params().RPCPort()));
|
||||||
boost::system::error_code v6_only_error;
|
boost::system::error_code v6_only_error;
|
||||||
boost::shared_ptr<ip::tcp::acceptor> acceptor(new ip::tcp::acceptor(*rpc_io_service));
|
|
||||||
|
|
||||||
bool fListening = false;
|
bool fListening = false;
|
||||||
std::string strerr;
|
std::string strerr;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
boost::shared_ptr<ip::tcp::acceptor> acceptor(new ip::tcp::acceptor(*rpc_io_service));
|
||||||
|
rpc_acceptors.push_back(acceptor);
|
||||||
acceptor->open(endpoint.protocol());
|
acceptor->open(endpoint.protocol());
|
||||||
acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
|
acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
|
||||||
|
|
||||||
@ -582,7 +584,6 @@ void StartRPCThreads()
|
|||||||
{
|
{
|
||||||
strerr = strprintf(_("An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s"), endpoint.port(), e.what());
|
strerr = strprintf(_("An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s"), endpoint.port(), e.what());
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// If dual IPv6/IPv4 failed (or we're opening loopback interfaces only), open IPv4 separately
|
// If dual IPv6/IPv4 failed (or we're opening loopback interfaces only), open IPv4 separately
|
||||||
if (!fListening || loopback || v6_only_error)
|
if (!fListening || loopback || v6_only_error)
|
||||||
@ -590,7 +591,8 @@ void StartRPCThreads()
|
|||||||
bindAddress = loopback ? asio::ip::address_v4::loopback() : asio::ip::address_v4::any();
|
bindAddress = loopback ? asio::ip::address_v4::loopback() : asio::ip::address_v4::any();
|
||||||
endpoint.address(bindAddress);
|
endpoint.address(bindAddress);
|
||||||
|
|
||||||
acceptor.reset(new ip::tcp::acceptor(*rpc_io_service));
|
boost::shared_ptr<ip::tcp::acceptor> acceptor(new ip::tcp::acceptor(*rpc_io_service));
|
||||||
|
rpc_acceptors.push_back(acceptor);
|
||||||
acceptor->open(endpoint.protocol());
|
acceptor->open(endpoint.protocol());
|
||||||
acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
|
acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
|
||||||
acceptor->bind(endpoint);
|
acceptor->bind(endpoint);
|
||||||
@ -634,7 +636,16 @@ void StopRPCThreads()
|
|||||||
{
|
{
|
||||||
if (rpc_io_service == NULL) return;
|
if (rpc_io_service == NULL) return;
|
||||||
|
|
||||||
|
// First, cancel all timers and acceptors
|
||||||
|
// This is not done automatically by ->stop(), and in some cases the destructor of
|
||||||
|
// asio::io_service can hang if this is skipped.
|
||||||
|
BOOST_FOREACH(const boost::shared_ptr<ip::tcp::acceptor> &acceptor, rpc_acceptors)
|
||||||
|
acceptor->cancel();
|
||||||
|
rpc_acceptors.clear();
|
||||||
|
BOOST_FOREACH(const PAIRTYPE(std::string, boost::shared_ptr<deadline_timer>) &timer, deadlineTimers)
|
||||||
|
timer.second->cancel();
|
||||||
deadlineTimers.clear();
|
deadlineTimers.clear();
|
||||||
|
|
||||||
rpc_io_service->stop();
|
rpc_io_service->stop();
|
||||||
if (rpc_worker_group != NULL)
|
if (rpc_worker_group != NULL)
|
||||||
rpc_worker_group->join_all();
|
rpc_worker_group->join_all();
|
||||||
|
Loading…
x
Reference in New Issue
Block a user