Kevacoin stratum server for solo-mining
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

234 lines
5.6 KiB

10 years ago
package stratum
import (
"bytes"
"encoding/binary"
"encoding/hex"
"log"
"strconv"
10 years ago
"sync"
"sync/atomic"
"time"
10 years ago
"kevacoin.org/keva-stratum/v2/cnutil"
"kevacoin.org/keva-stratum/v2/util"
10 years ago
)
type Job struct {
height int64
10 years ago
sync.RWMutex
id string
extraNonce uint32
submissions map[string]struct{}
10 years ago
}
type Miner struct {
lastBeat int64
startedAt int64
validShares int64
invalidShares int64
staleShares int64
accepts int64
rejects int64
shares map[int64]int64
sync.RWMutex
id string
ip string
10 years ago
}
func (job *Job) submit(nonce string) bool {
job.Lock()
defer job.Unlock()
if _, exist := job.submissions[nonce]; exist {
10 years ago
return true
}
job.submissions[nonce] = struct{}{}
10 years ago
return false
}
8 years ago
func NewMiner(id string, ip string) *Miner {
8 years ago
shares := make(map[int64]int64)
return &Miner{id: id, ip: ip, shares: shares}
10 years ago
}
8 years ago
func (cs *Session) getJob(t *BlockTemplate) *JobReplyData {
height := atomic.SwapInt64(&cs.lastBlockHeight, t.height)
10 years ago
if height == t.height {
8 years ago
return &JobReplyData{}
10 years ago
}
8 years ago
extraNonce := atomic.AddUint32(&cs.endpoint.extraNonce, 1)
blob := t.nextBlob(extraNonce, cs.endpoint.instanceId)
id := atomic.AddUint64(&cs.endpoint.jobSequence, 1)
job := &Job{
id: strconv.FormatUint(id, 10),
extraNonce: extraNonce,
height: t.height,
}
job.submissions = make(map[string]struct{})
8 years ago
cs.pushJob(job)
majorVersion, err := strconv.ParseInt(blob[0:2], 16, 32)
if err != nil {
log.Printf("Failed to get major version: %v\n", err)
}
var algo string
if majorVersion == 10 {
algo = "cn/r"
} else if majorVersion == 12 {
algo = "rx/keva"
}
reply := &JobReplyData{
JobId: job.id,
Blob: blob,
Target: cs.endpoint.targetHex,
Height: t.height,
Algo: algo,
SeedHash: t.seedHash,
NextSeedHash: t.nextSeedHash,
}
8 years ago
return reply
10 years ago
}
8 years ago
func (cs *Session) pushJob(job *Job) {
cs.Lock()
defer cs.Unlock()
cs.validJobs = append(cs.validJobs, job)
10 years ago
8 years ago
if len(cs.validJobs) > 4 {
cs.validJobs = cs.validJobs[1:]
10 years ago
}
8 years ago
}
10 years ago
8 years ago
func (cs *Session) findJob(id string) *Job {
cs.Lock()
defer cs.Unlock()
for _, job := range cs.validJobs {
if job.id == id {
8 years ago
return job
}
}
return nil
10 years ago
}
func (m *Miner) heartbeat() {
now := util.MakeTimestamp()
atomic.StoreInt64(&m.lastBeat, now)
10 years ago
}
func (m *Miner) getLastBeat() int64 {
return atomic.LoadInt64(&m.lastBeat)
}
func (m *Miner) storeShare(diff int64) {
8 years ago
now := util.MakeTimestamp() / 1000
m.Lock()
m.shares[now] += diff
m.Unlock()
}
8 years ago
func (m *Miner) hashrate(estimationWindow time.Duration) float64 {
now := util.MakeTimestamp() / 1000
totalShares := int64(0)
8 years ago
window := int64(estimationWindow / time.Second)
boundary := now - m.startedAt
if boundary > window {
boundary = window
}
m.Lock()
for k, v := range m.shares {
8 years ago
if k < now-86400 {
delete(m.shares, k)
} else if k >= now-window {
totalShares += v
}
}
m.Unlock()
return float64(totalShares) / float64(boundary)
}
func (m *Miner) processShare(s *StratumServer, cs *Session, job *Job, t *BlockTemplate, nonce string, result string) bool {
r := s.rpc()
shareBuff := make([]byte, len(t.buffer))
copy(shareBuff, t.buffer)
copy(shareBuff[t.reservedOffset+4:t.reservedOffset+7], cs.endpoint.instanceId)
10 years ago
extraBuff := new(bytes.Buffer)
binary.Write(extraBuff, binary.BigEndian, job.extraNonce)
copy(shareBuff[t.reservedOffset:], extraBuff.Bytes())
10 years ago
nonceBuff, _ := hex.DecodeString(nonce)
copy(shareBuff[39:], nonceBuff)
var hashBytes, convertedBlob []byte
10 years ago
if s.config.BypassShareValidation {
hashBytes, _ = hex.DecodeString(result)
} else {
convertedBlob = cnutil.ConvertBlob(shareBuff)
if len(t.seedHash) == 0 {
// cn/r
hashBytes = cnutil.Hash(convertedBlob, false, int(t.height), "")
} else {
// rx/keva
hashBytes = cnutil.Hash(convertedBlob, false, int(t.height), t.seedHash)
}
}
if !s.config.BypassShareValidation && hex.EncodeToString(hashBytes) != result {
log.Printf("Bad hash from miner %v@%v", m.id, cs.ip)
atomic.AddInt64(&m.invalidShares, 1)
10 years ago
return false
}
7 years ago
hashDiff, ok := util.GetHashDifficulty(hashBytes)
if !ok {
log.Printf("Bad hash from miner %v@%v", m.id, cs.ip)
atomic.AddInt64(&m.invalidShares, 1)
return false
}
block := hashDiff.Cmp(t.difficulty) >= 0
8 years ago
10 years ago
if block {
_, err := r.SubmitBlock(hex.EncodeToString(shareBuff))
10 years ago
if err != nil {
atomic.AddInt64(&m.rejects, 1)
atomic.AddInt64(&r.Rejects, 1)
log.Printf("Block rejected at height %d: %v", t.height, err)
10 years ago
} else {
if len(convertedBlob) == 0 {
convertedBlob = cnutil.ConvertBlob(shareBuff)
}
blockFastHash := hex.EncodeToString(util.ReverseBytes(cnutil.FastHash(convertedBlob)))
now := util.MakeTimestamp()
roundShares := atomic.SwapInt64(&s.roundShares, 0)
ratio := float64(roundShares) / float64(t.diffInt64)
s.blocksMu.Lock()
8 years ago
s.blockStats[now] = blockEntry{height: t.height, hash: blockFastHash, variance: ratio}
s.blocksMu.Unlock()
atomic.AddInt64(&m.accepts, 1)
atomic.AddInt64(&r.Accepts, 1)
atomic.StoreInt64(&r.LastSubmissionAt, now)
log.Printf("Block %s found at height %d by miner %v@%v with ratio %.4f", blockFastHash[0:6], t.height, m.id, cs.ip, ratio)
// Immediately refresh current BT and send new jobs
s.refreshBlockTemplate(true)
10 years ago
}
} else if hashDiff.Cmp(cs.endpoint.difficulty) < 0 {
log.Printf("Rejected low difficulty share of %v from %v@%v", hashDiff, m.id, cs.ip)
atomic.AddInt64(&m.invalidShares, 1)
10 years ago
return false
}
atomic.AddInt64(&s.roundShares, cs.endpoint.config.Difficulty)
atomic.AddInt64(&m.validShares, 1)
m.storeShare(cs.endpoint.config.Difficulty)
log.Printf("Valid share at difficulty %v/%v", cs.endpoint.config.Difficulty, hashDiff)
10 years ago
return true
}