diff --git a/crawler.go b/crawler.go index 41d51be..beecb08 100644 --- a/crawler.go +++ b/crawler.go @@ -136,8 +136,6 @@ func crawlNode(s *dnsseeder, nd *node) { // crawlEnd is a deffered func to update theList after a crawl is all done func crawlEnd(nd *node) { nd.crawlActive = false - // FIXME - scan for long term crawl active node. Dial timeout is 10 seconds - // so should be done in under 5 minutes } // crawlIP retrievs a slice of ip addresses from a client diff --git a/main.go b/main.go index 963f723..b99ff53 100644 --- a/main.go +++ b/main.go @@ -87,7 +87,6 @@ func main() { os.Exit(1) } if nnw != nil { - // FIXME - lock this config.seeders[nnw.name] = nnw config.order = append(config.order, nnw.name) } @@ -145,7 +144,6 @@ func main() { log.Printf("debug - Audit nodes timer triggered\n") } for _, s := range config.seeders { - // FIXME goroutines for these s.auditNodes() } case <-dnsChan: diff --git a/seeder.go b/seeder.go index 75e3210..a4938da 100644 --- a/seeder.go +++ b/seeder.go @@ -393,6 +393,9 @@ func (s *dnsseeder) loadDNS() { // isFull returns true if the number of remote clients is more than we want to store func (s *dnsseeder) isFull() bool { + s.mtx.RLock() + defer s.mtx.RUnlock() + if len(s.theList) > s.maxSize { return true }