From ab7e98466de0653d8f281cd2f1445a952862c26c Mon Sep 17 00:00:00 2001 From: Andrey Kravchenko Date: Fri, 25 Jul 2025 16:16:49 +0300 Subject: [PATCH 1/3] Change cycle for initialize minWorkers in scaling Note: Simplification if change the const minWorkers may be forget change range of cycle --- storage/fetch.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/fetch.go b/storage/fetch.go index 5194e0d..e26bd24 100644 --- a/storage/fetch.go +++ b/storage/fetch.go @@ -121,7 +121,7 @@ func (f *PreFetcher) scaling() { cancels := make([]context.CancelFunc, 0, maxWorkers) - for i := 0; i < 8; i++ { + for range minWorkers { ctx, cancel := context.WithCancel(f.ctx) cancels = append(cancels, cancel) go f.worker(ctx) From cd193b2bef3c641892e9947eceb0ac5a4ac17685 Mon Sep 17 00:00:00 2001 From: Andrey Kravchenko Date: Fri, 25 Jul 2025 16:30:29 +0300 Subject: [PATCH 2/3] Fix incorrect reset of number of fails when doing ping op Note: If ping returns an error, but there are less 3 fails, the counter is always reset to zero --- storage/torrent.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/storage/torrent.go b/storage/torrent.go index 30cfe08..62fd544 100644 --- a/storage/torrent.go +++ b/storage/torrent.go @@ -349,12 +349,13 @@ func (t *Torrent) peersManager(workerCtx context.Context) { qCtx, cancel := context.WithTimeout(workerCtx, 5*time.Second) defer cancel() - if err := peer.ping(qCtx); err != nil && atomic.AddInt32(&peer.fails, 1) > 3 { + err := peer.ping(qCtx) + if err != nil && atomic.AddInt32(&peer.fails, 1) > 3 { Logger("[STORAGE_PEERS] PING FAILED FOR PEER", hex.EncodeToString(peer.nodeId), "AND TOO MANY FAILS, CLOSING CONNECTION", "BAG", hex.EncodeToString(t.BagID)) peer.Close() - return + } else if err == nil { + atomic.StoreInt32(&peer.fails, 0) } - atomic.StoreInt32(&peer.fails, 0) }() } else if time.Since(peer.lastNeighboursAt) > 30*time.Second { peer.lastNeighboursAt = time.Now() From ff0a9c5126b3d330d0e8416cd324b74d8805c966 Mon Sep 17 00:00:00 2001 From: Andrey Kravchenko Date: Fri, 25 Jul 2025 16:42:40 +0300 Subject: [PATCH 3/3] Remove iterator from cycles perScaleWorkers Remove static analyzer warning --- storage/fetch.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/fetch.go b/storage/fetch.go index e26bd24..6c69741 100644 --- a/storage/fetch.go +++ b/storage/fetch.go @@ -161,7 +161,7 @@ func (f *PreFetcher) scaling() { if totalInPeriod > 0 && totalInPeriod > maxInPeriod && workers < maxWorkers { maxInPeriod = totalInPeriod - for i := 0; i < perScaleWorkers; i++ { + for range perScaleWorkers { ctx, cancel := context.WithCancel(f.ctx) cancels = append(cancels, cancel) go f.worker(ctx) @@ -170,7 +170,7 @@ func (f *PreFetcher) scaling() { Logger("[STORAGE_SCALER] ADDED WORKER, TOTAL:", len(cancels), "BAG", hex.EncodeToString(f.torrent.BagID), "MAX", maxInPeriod) } else if totalInPeriod < maxInPeriod-maxInPeriod/4 && workers > minWorkers { maxInPeriod = totalInPeriod - for i := 0; i < perScaleWorkers; i++ { + for range perScaleWorkers { last := len(cancels) - 1 cancels[last]() cancels = cancels[:last]