TCPServer: removed unnecessary mutex

pull/238/head
SChernykh 2023-02-27 15:38:30 +01:00
parent 7a1afc7a95
commit 12a011a9ff
5 changed files with 172 additions and 185 deletions

View File

@ -261,6 +261,8 @@ void P2PServer::on_connect_failed(bool is_v6, const raw_ip& ip, int port)
void P2PServer::update_peer_connections()
{
check_event_loop_thread(__func__);
const uint64_t cur_time = seconds_since_epoch();
const uint64_t last_updated = m_pool->side_chain().last_updated();
@ -268,8 +270,6 @@ void P2PServer::update_peer_connections()
m_fastestPeer = nullptr;
unordered_set<raw_ip> connected_clients;
{
MutexLock lock(m_clientsListLock);
connected_clients.reserve(m_numConnections);
for (P2PClient* client = static_cast<P2PClient*>(m_connectedClientsList->m_next); client != m_connectedClientsList; client = static_cast<P2PClient*>(client->m_next)) {
@ -304,7 +304,6 @@ void P2PServer::update_peer_connections()
}
}
}
}
std::vector<Peer> peer_list;
{
@ -363,7 +362,7 @@ void P2PServer::update_peer_connections()
void P2PServer::update_peer_list()
{
MutexLock lock(m_clientsListLock);
check_event_loop_thread(__func__);
const uint64_t cur_time = seconds_since_epoch();
for (P2PClient* client = static_cast<P2PClient*>(m_connectedClientsList->m_next); client != m_connectedClientsList; client = static_cast<P2PClient*>(client->m_next)) {
@ -843,6 +842,8 @@ void P2PServer::broadcast(const PoolBlock& block, const PoolBlock* parent)
void P2PServer::on_broadcast()
{
check_event_loop_thread(__func__);
std::vector<Broadcast*> broadcast_queue;
broadcast_queue.reserve(2);
@ -863,8 +864,6 @@ void P2PServer::on_broadcast()
}
});
MutexLock lock(m_clientsListLock);
for (P2PClient* client = static_cast<P2PClient*>(m_connectedClientsList->m_next); client != m_connectedClientsList; client = static_cast<P2PClient*>(client->m_next)) {
if (!client->is_good()) {
continue;
@ -941,9 +940,7 @@ void P2PServer::on_broadcast()
uint64_t P2PServer::get_random64()
{
if (!server_event_loop_thread) {
LOGERR(1, "get_random64() was called from another thread, this is not thread safe");
}
check_event_loop_thread(__func__);
return m_rng();
}
@ -965,9 +962,9 @@ void P2PServer::show_peers_async()
}
}
void P2PServer::show_peers()
void P2PServer::show_peers() const
{
MutexLock lock(m_clientsListLock);
check_event_loop_thread(__func__);
const uint64_t cur_time = seconds_since_epoch();
size_t n = 0;
@ -1070,6 +1067,8 @@ void P2PServer::flush_cache()
void P2PServer::download_missing_blocks()
{
check_event_loop_thread(__func__);
if (!m_lookForMissingBlocks) {
return;
}
@ -1083,8 +1082,6 @@ void P2PServer::download_missing_blocks()
return;
}
MutexLock lock(m_clientsListLock);
if (m_numConnections == 0) {
return;
}
@ -1271,7 +1268,6 @@ bool P2PServer::P2PClient::on_connect()
}
// Don't allow multiple connections to/from the same IP (except localhost)
// server->m_clientsListLock is already locked here
if (!m_addr.is_localhost()) {
for (P2PClient* client = static_cast<P2PClient*>(server->m_connectedClientsList->m_next); client != server->m_connectedClientsList; client = static_cast<P2PClient*>(client->m_next)) {
if ((client != this) && (client->m_addr == m_addr)) {
@ -1757,6 +1753,8 @@ bool P2PServer::P2PClient::check_handshake_solution(const hash& solution, const
bool P2PServer::P2PClient::on_handshake_challenge(const uint8_t* buf)
{
check_event_loop_thread(__func__);
P2PServer* server = static_cast<P2PServer*>(m_owner);
uint8_t challenge[CHALLENGE_SIZE];
@ -1772,22 +1770,13 @@ bool P2PServer::P2PClient::on_handshake_challenge(const uint8_t* buf)
m_peerId = peer_id;
bool same_peer = false;
{
MutexLock lock(server->m_clientsListLock);
for (const P2PClient* client = static_cast<P2PClient*>(server->m_connectedClientsList->m_next); client != server->m_connectedClientsList; client = static_cast<P2PClient*>(client->m_next)) {
if ((client != this) && (client->m_peerId == peer_id)) {
LOGWARN(5, "tried to connect to the same peer twice: current connection " << static_cast<const char*>(client->m_addrString) << ", new connection " << static_cast<const char*>(m_addrString));
same_peer = true;
break;
}
}
}
if (same_peer) {
close();
return true;
}
}
send_handshake_solution(challenge);
return true;
@ -2033,6 +2022,8 @@ bool P2PServer::P2PClient::on_block_broadcast(const uint8_t* buf, uint32_t size,
bool P2PServer::P2PClient::on_peer_list_request(const uint8_t*)
{
check_event_loop_thread(__func__);
P2PServer* server = static_cast<P2PServer*>(m_owner);
const uint64_t cur_time = seconds_since_epoch();
const bool first = (m_prevIncomingPeerListRequest == 0);
@ -2050,8 +2041,6 @@ bool P2PServer::P2PClient::on_peer_list_request(const uint8_t*)
Peer peers[PEER_LIST_RESPONSE_MAX_PEERS];
uint32_t num_selected_peers = 0;
{
MutexLock lock(server->m_clientsListLock);
// Send every 4th peer on average, selected at random
const uint32_t peers_to_send_target = std::min<uint32_t>(PEER_LIST_RESPONSE_MAX_PEERS, std::max<uint32_t>(1, server->m_numConnections / 4));
@ -2078,7 +2067,6 @@ bool P2PServer::P2PClient::on_peer_list_request(const uint8_t*)
peers[k] = p;
}
}
}
// Protocol version message:
// - IPv4 address = 255.255.255.255

View File

@ -253,7 +253,7 @@ private:
uv_async_t m_showPeersAsync;
static void on_show_peers(uv_async_t* handle) { reinterpret_cast<P2PServer*>(handle->data)->show_peers(); }
void show_peers();
void show_peers() const;
void on_shutdown() override;
};

View File

@ -484,11 +484,11 @@ void StratumServer::show_workers_async()
void StratumServer::show_workers()
{
check_event_loop_thread(__func__);
const uint64_t cur_time = seconds_since_epoch();
const difficulty_type pool_diff = m_pool->side_chain().difficulty();
MutexLock lock(m_clientsListLock);
int addr_len = 0;
for (const StratumClient* c = static_cast<StratumClient*>(m_connectedClientsList->m_next); c != m_connectedClientsList; c = static_cast<StratumClient*>(c->m_next)) {
addr_len = std::max(addr_len, static_cast<int>(strlen(c->m_addrString)));
@ -671,6 +671,8 @@ void StratumServer::update_auto_diff(StratumClient* client, const uint64_t times
void StratumServer::on_blobs_ready()
{
check_event_loop_thread(__func__);
std::vector<BlobsData*> blobs_queue;
blobs_queue.reserve(2);
@ -699,8 +701,6 @@ void StratumServer::on_blobs_ready()
uint32_t num_sent = 0;
const uint64_t cur_time = seconds_since_epoch();
{
MutexLock lock2(m_clientsListLock);
for (StratumClient* client = static_cast<StratumClient*>(m_connectedClientsList->m_prev); client != m_connectedClientsList; client = static_cast<StratumClient*>(client->m_prev)) {
++numClientsProcessed;
@ -794,7 +794,6 @@ void StratumServer::on_blobs_ready()
if (numClientsProcessed != num_connections) {
LOGWARN(1, "client list is broken, expected " << num_connections << ", got " << numClientsProcessed << " clients");
}
}
LOGINFO(3, "sent new job to " << num_sent << '/' << numClientsProcessed << " clients");
}

View File

@ -170,7 +170,8 @@ protected:
uv_loop_t m_loop;
uv_mutex_t m_clientsListLock;
static void check_event_loop_thread(const char *func);
std::vector<Client*> m_preallocatedClients;
Client* get_client();

View File

@ -61,7 +61,6 @@ TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::TCPServer(allocate_client_callback all
}
m_shutdownAsync.data = this;
uv_mutex_init_checked(&m_clientsListLock);
uv_mutex_init_checked(&m_bansLock);
m_connectedClientsList = m_allocateNewClient();
@ -369,11 +368,17 @@ bool TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::connect_to_peer(Client* client)
}
template<size_t READ_BUF_SIZE, size_t WRITE_BUF_SIZE>
void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::close_sockets(bool listen_sockets)
void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::check_event_loop_thread(const char* func)
{
if (!server_event_loop_thread) {
LOGERR(1, "closing sockets from another thread, this is not thread safe");
LOGERR(1, func << " called from another thread, this is not thread safe");
}
}
template<size_t READ_BUF_SIZE, size_t WRITE_BUF_SIZE>
void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::close_sockets(bool listen_sockets)
{
check_event_loop_thread(__func__);
if (listen_sockets) {
for (uv_tcp_t* s : m_listenSockets6) {
@ -391,8 +396,6 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::close_sockets(bool listen_sockets
}
size_t numClosed = 0;
{
MutexLock lock(m_clientsListLock);
for (Client* c = m_connectedClientsList->m_next; c != m_connectedClientsList; c = c->m_next) {
uv_handle_t* h = reinterpret_cast<uv_handle_t*>(&c->m_socket);
@ -401,7 +404,6 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::close_sockets(bool listen_sockets
++numClosed;
}
}
}
if (numClosed > 0) {
LOGWARN(1, "closed " << numClosed << " active client connections");
@ -418,7 +420,6 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::shutdown_tcp()
uv_async_send(&m_shutdownAsync);
uv_thread_join(&m_loopThread);
uv_mutex_destroy(&m_clientsListLock);
uv_mutex_destroy(&m_bansLock);
LOGINFO(1, "stopped");
@ -464,9 +465,7 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::print_bans()
template<size_t READ_BUF_SIZE, size_t WRITE_BUF_SIZE>
bool TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::send_internal(Client* client, SendCallbackBase&& callback)
{
if (!server_event_loop_thread) {
LOGERR(1, "sending data from another thread, this is not thread safe");
}
check_event_loop_thread(__func__);
if (client->m_isClosing) {
LOGWARN(5, "client " << static_cast<const char*>(client->m_addrString) << " is being disconnected, can't send any more data");
@ -586,14 +585,14 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::on_new_connection(uv_stream_t* se
template<size_t READ_BUF_SIZE, size_t WRITE_BUF_SIZE>
void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::on_connection_close(uv_handle_t* handle)
{
check_event_loop_thread(__func__);
Client* client = static_cast<Client*>(handle->data);
TCPServer* owner = client->m_owner;
LOGINFO(5, "peer " << log::Gray() << static_cast<char*>(client->m_addrString) << log::NoColor() << " disconnected");
if (owner) {
MutexLock lock(owner->m_clientsListLock);
Client* prev_in_list = client->m_prev;
Client* next_in_list = client->m_next;
@ -688,7 +687,7 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::on_new_client(uv_stream_t* server
template<size_t READ_BUF_SIZE, size_t WRITE_BUF_SIZE>
void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::on_new_client(uv_stream_t* server, Client* client)
{
MutexLock lock(m_clientsListLock);
check_event_loop_thread(__func__);
client->m_prev = m_connectedClientsList;
client->m_next = m_connectedClientsList->m_next;