Multiple bugfixes

merge-mining
SChernykh 2023-12-10 15:31:40 +01:00
parent 32b502a1e3
commit 4362ee490c
10 changed files with 102 additions and 33 deletions

View File

@ -1321,7 +1321,7 @@ bool BlockTemplate::get_aux_proof(const uint32_t template_id, const hash& h, std
}
std::vector<std::pair<bool, hash>> t;
if (!get_merkle_proof(m_poolBlockTemplate->m_merkleTree, m_poolBlockTemplate->m_sidechainId, t)) {
if (!get_merkle_proof(m_poolBlockTemplate->m_merkleTree, h, t)) {
return false;
}

View File

@ -387,6 +387,10 @@ struct AuxChainData
{
FORCEINLINE AuxChainData(const hash& _id, const hash& _data, const difficulty_type& _difficulty) : unique_id(_id), data(_data), difficulty(_difficulty) {}
FORCEINLINE bool operator==(const AuxChainData& rhs) const {
return (unique_id == rhs.unique_id) && (data == rhs.data) && (difficulty == rhs.difficulty);
}
hash unique_id;
hash data;
difficulty_type difficulty;

View File

@ -198,9 +198,19 @@ void MergeMiningClient::merge_mining_get_job(uint64_t height, const hash& prev_i
JSONRPCRequest::call(m_host, m_port, std::string(buf, s.m_pos), std::string(), m_pool->params().m_socks5Proxy,
[this](const char* data, size_t size, double) {
WriteLock lock(m_lock);
bool changed = false;
hash chain_id;
parse_merge_mining_get_job(data, size);
{
WriteLock lock(m_lock);
if (parse_merge_mining_get_job(data, size, changed)) {
chain_id = m_chainID;
}
}
if (changed && !chain_id.empty()) {
m_pool->update_aux_data(chain_id);
}
},
[this](const char* data, size_t size, double) {
if (size > 0) {
@ -210,7 +220,7 @@ void MergeMiningClient::merge_mining_get_job(uint64_t height, const hash& prev_i
}, &m_loop);
}
bool MergeMiningClient::parse_merge_mining_get_job(const char* data, size_t size)
bool MergeMiningClient::parse_merge_mining_get_job(const char* data, size_t size, bool& changed)
{
auto err = [](const char* msg) {
LOGWARN(1, "merge_mining_get_job RPC call failed: " << msg);
@ -263,6 +273,8 @@ bool MergeMiningClient::parse_merge_mining_get_job(const char* data, size_t size
m_auxDiff.lo = result["aux_diff"].GetUint64();
m_auxDiff.hi = 0;
changed = true;
return true;
}
@ -276,7 +288,7 @@ void MergeMiningClient::merge_mining_submit_solution(const std::vector<uint8_t>&
s << "{\"jsonrpc\":\"2.0\",\"id\":\"0\",\"method\":\"merge_mining_submit_solution\",\"params\":{"
<< "\"aux_blob\":\"" << log::hex_buf(m_auxBlob.data(), m_auxBlob.size()) << '"'
<< ",\"aux_hash\":\"" << m_auxHash << '"'
<< ",\"blob\":" << log::hex_buf(blob.data(), blob.size())
<< ",\"blob\":\"" << log::hex_buf(blob.data(), blob.size()) << '"'
<< ",\"merkle_proof\":[";
for (size_t i = 0, n = merkle_proof.size(); i < n; ++i) {

View File

@ -45,7 +45,7 @@ private:
bool parse_merge_mining_get_chain_id(const char* data, size_t size);
void merge_mining_get_job(uint64_t height, const hash& prev_id, const std::string& wallet, const hash& aux_hash);
bool parse_merge_mining_get_job(const char* data, size_t size);
bool parse_merge_mining_get_job(const char* data, size_t size, bool& changed);
bool parse_merge_mining_submit_solution(const char* data, size_t size);

View File

@ -244,6 +244,7 @@ void Miner::run(WorkerData* data)
if (j.m_auxDiff.check_pow(h)) {
for (const AuxChainData& aux_data : j.m_auxChains) {
if (aux_data.difficulty.check_pow(h)) {
LOGINFO(0, log::Green() << "AUX BLOCK FOUND: chain_id " << aux_data.unique_id << ", diff " << j.m_auxDiff << ", worker thread " << data->m_index << '/' << data->m_count);
m_pool->submit_aux_block(aux_data.unique_id, j.m_templateId, j.m_nonce, j.m_extraNonce);
}
}

View File

@ -364,7 +364,7 @@ void P2PServer::update_peer_connections()
peer_list.pop_back();
}
if (!has_good_peers && ((m_timerCounter % 10) == 0)) {
if (!has_good_peers && ((m_timerCounter % 10) == 0) && (SideChain::network_type() == NetworkType::Mainnet)) {
LOGERR(1, "no connections to other p2pool nodes, check your monerod/p2pool/network/firewall setup!!!");
load_peer_list();
if (m_peerListMonero.empty()) {

View File

@ -131,6 +131,7 @@ p2pool::p2pool(int argc, char* argv[])
uv_rwlock_init_checked(&m_mainchainLock);
uv_rwlock_init_checked(&m_minerDataLock);
uv_rwlock_init_checked(&m_ZMQReaderLock);
uv_rwlock_init_checked(&m_mergeMiningClientsLock);
uv_mutex_init_checked(&m_foundBlocksLock);
#ifdef WITH_RANDOMX
uv_mutex_init_checked(&m_minerLock);
@ -187,14 +188,19 @@ p2pool::~p2pool()
}
#endif
for (const MergeMiningClient* c : m_mergeMiningClients) {
delete c;
{
WriteLock lock(m_mergeMiningClientsLock);
for (const MergeMiningClient* c : m_mergeMiningClients) {
delete c;
}
m_mergeMiningClients.clear();
}
m_mergeMiningClients.clear();
uv_rwlock_destroy(&m_mainchainLock);
uv_rwlock_destroy(&m_minerDataLock);
uv_rwlock_destroy(&m_ZMQReaderLock);
uv_rwlock_destroy(&m_mergeMiningClientsLock);
uv_mutex_destroy(&m_foundBlocksLock);
#ifdef WITH_RANDOMX
uv_mutex_destroy(&m_minerLock);
@ -340,26 +346,7 @@ void p2pool::handle_miner_data(MinerData& data)
cleanup_mainchain_data(data.height);
}
data.aux_chains.clear();
if (!m_mergeMiningClients.empty()) {
data.aux_chains.reserve(m_mergeMiningClients.size());
std::vector<hash> tmp;
tmp.reserve(m_mergeMiningClients.size() + 1);
for (const MergeMiningClient* c : m_mergeMiningClients) {
data.aux_chains.emplace_back(c->aux_id(), c->aux_data(), c->aux_diff());
tmp.emplace_back(c->aux_id());
}
tmp.emplace_back(m_sideChain->consensus_hash());
if (!find_aux_nonce(tmp, data.aux_nonce)) {
LOGERR(1, "Failed to find the aux nonce for merge mining. Merge mining will be off this round.");
data.aux_chains.clear();
}
}
update_aux_data(hash());
// TODO: remove after testing
#if 0
@ -540,6 +527,56 @@ void p2pool::handle_chain_main(ChainMain& data, const char* extra)
m_zmqLastActive = seconds_since_epoch();
}
void p2pool::update_aux_data(const hash& chain_id)
{
{
WriteLock lock(m_minerDataLock);
MinerData& data = m_minerData;
const std::vector<AuxChainData> old_data = data.aux_chains;
data.aux_chains.clear();
{
ReadLock lock2(m_mergeMiningClientsLock);
if (!m_mergeMiningClients.empty()) {
data.aux_chains.reserve(m_mergeMiningClients.size());
std::vector<hash> tmp;
tmp.reserve(m_mergeMiningClients.size() + 1);
for (const MergeMiningClient* c : m_mergeMiningClients) {
data.aux_chains.emplace_back(c->aux_id(), c->aux_data(), c->aux_diff());
tmp.emplace_back(c->aux_id());
}
tmp.emplace_back(m_sideChain->consensus_hash());
if (!find_aux_nonce(tmp, data.aux_nonce)) {
LOGERR(1, "Failed to find the aux nonce for merge mining. Merge mining will be off this round.");
data.aux_chains.clear();
}
}
}
if (old_data == data.aux_chains) {
return;
}
}
if (!chain_id.empty()) {
LOGINFO(4, "New aux data from chain " << chain_id);
}
if (!is_main_thread()) {
update_block_template_async();
}
else {
update_block_template();
}
}
void p2pool::submit_block_async(uint32_t template_id, uint32_t nonce, uint32_t extra_nonce)
{
{
@ -590,6 +627,8 @@ void p2pool::submit_block_async(std::vector<uint8_t>&& blob)
void p2pool::submit_aux_block(const hash& chain_id, uint32_t template_id, uint32_t nonce, uint32_t extra_nonce) const
{
LOGINFO(3, "submit_aux_block: template id = " << template_id << ", chain_id = " << chain_id << ", nonce = " << nonce << ", extra_nonce = " << extra_nonce);
size_t nonce_offset = 0;
size_t extra_nonce_offset = 0;
size_t sidechain_id_offset = 0;
@ -607,6 +646,8 @@ void p2pool::submit_aux_block(const hash& chain_id, uint32_t template_id, uint32
memcpy(p + extra_nonce_offset, &extra_nonce, EXTRA_NONCE_SIZE);
memcpy(p + sidechain_id_offset, sidechain_id.h, HASH_SIZE);
ReadLock lock(m_mergeMiningClientsLock);
for (MergeMiningClient* c : m_mergeMiningClients) {
if (chain_id == c->aux_id()) {
std::vector<hash> proof;
@ -900,8 +941,14 @@ void p2pool::download_block_headers(uint64_t current_height)
}
}
for (const auto& h : m_params->m_mergeMiningHosts) {
m_mergeMiningClients.push_back(new MergeMiningClient(this, h.m_host, h.m_wallet));
{
WriteLock lock(m_mergeMiningClientsLock);
m_mergeMiningClients.clear();
for (const auto& h : m_params->m_mergeMiningHosts) {
m_mergeMiningClients.push_back(new MergeMiningClient(this, h.m_host, h.m_wallet));
}
}
m_startupFinished = true;

View File

@ -84,6 +84,8 @@ public:
virtual void handle_miner_data(MinerData& data) override;
virtual void handle_chain_main(ChainMain& data, const char* extra) override;
void update_aux_data(const hash& chain_id);
void submit_block_async(uint32_t template_id, uint32_t nonce, uint32_t extra_nonce);
void submit_block_async(std::vector<uint8_t>&& blob);
@ -230,6 +232,7 @@ private:
mutable uv_rwlock_t m_ZMQReaderLock;
ZMQReader* m_ZMQReader = nullptr;
mutable uv_rwlock_t m_mergeMiningClientsLock;
std::vector<MergeMiningClient*> m_mergeMiningClients;
hash m_getMinerDataHash;

View File

@ -403,6 +403,8 @@ bool StratumServer::on_submit(StratumClient* client, uint32_t id, const char* jo
if (aux_diff.check_pow(resultHash)) {
for (const AuxChainData& aux_data : block.get_aux_chains(template_id)) {
if (aux_data.difficulty.check_pow(resultHash)) {
const char* s = client->m_customUser;
LOGINFO(0, log::Green() << "client " << static_cast<char*>(client->m_addrString) << (*s ? " user " : "") << s << " found an aux block for chain_id " << aux_data.unique_id << ", diff " << aux_data.difficulty << ", submitting it");
m_pool->submit_aux_block(aux_data.unique_id, template_id, nonce, extra_nonce);
}
}

View File

@ -14,7 +14,7 @@ class Server(http.server.BaseHTTPRequestHandler):
length = int(self.headers['content-length'])
request = self.rfile.read(length)
print(request.decode('utf-8'))
request = json.loads(request)
request = json.loads(request.decode('utf-8'))
self.send_response(200)
self.send_header('Content-type', 'application/json')