diff --git a/block/internal/da/async_block_retriever_test.go b/block/internal/da/async_block_retriever_test.go index 82525ea31f..4464498fc7 100644 --- a/block/internal/da/async_block_retriever_test.go +++ b/block/internal/da/async_block_retriever_test.go @@ -88,7 +88,7 @@ func TestAsyncBlockRetriever_FetchAndCache(t *testing.T) { var err error // Poll for up to 2 seconds for the block to be cached - for i := 0; i < 40; i++ { + for range 40 { block, err = fetcher.GetCachedBlock(ctx, 100) require.NoError(t, err) if block != nil { diff --git a/block/internal/da/client_test.go b/block/internal/da/client_test.go index e0301874ba..b71ea6570f 100644 --- a/block/internal/da/client_test.go +++ b/block/internal/da/client_test.go @@ -185,7 +185,7 @@ func TestClient_Get(t *testing.T) { blobs := make([]*blobrpc.Blob, 3) ids := make([]datypes.ID, 3) - for i := 0; i < 3; i++ { + for i := range 3 { blb, err := blobrpc.NewBlobV0(ns, []byte{byte(i)}) require.NoError(t, err) blobs[i] = blb @@ -203,7 +203,7 @@ func TestClient_Get(t *testing.T) { result, err := cl.Get(context.Background(), ids, nsBz) require.NoError(t, err) require.Len(t, result, 3) - for i := 0; i < 3; i++ { + for i := range 3 { assert.Equal(t, blobs[i].Data(), result[i]) } }) @@ -235,7 +235,7 @@ func TestClient_GetProofs(t *testing.T) { blobModule := mocks.NewMockBlobModule(t) ids := make([]datypes.ID, 3) - for i := 0; i < 3; i++ { + for i := range 3 { blb, _ := blobrpc.NewBlobV0(ns, []byte{byte(i)}) ids[i] = blobrpc.MakeID(uint64(200+i), blb.Commitment) blobModule.On("GetProof", mock.Anything, uint64(200+i), ns, blb.Commitment).Return(&blobrpc.Proof{}, nil).Once() @@ -263,7 +263,7 @@ func TestClient_Validate(t *testing.T) { ids := make([]datypes.ID, 3) proofs := make([]datypes.Proof, 3) - for i := 0; i < 3; i++ { + for i := range 3 { blb, _ := blobrpc.NewBlobV0(ns, []byte{byte(i)}) ids[i] = blobrpc.MakeID(uint64(300+i), blb.Commitment) proofBz, _ := json.Marshal(&blobrpc.Proof{}) @@ -281,7 +281,7 @@ func TestClient_Validate(t *testing.T) { results, err := cl.Validate(context.Background(), ids, proofs, nsBz) require.NoError(t, err) require.Len(t, results, 3) - for i := 0; i < 3; i++ { + for i := range 3 { assert.Equal(t, i%2 == 0, results[i]) } }) diff --git a/block/internal/executing/executor_restart_test.go b/block/internal/executing/executor_restart_test.go index e5c3b6af40..51d940a630 100644 --- a/block/internal/executing/executor_restart_test.go +++ b/block/internal/executing/executor_restart_test.go @@ -316,7 +316,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) { lastStateRoot := initStateRoot for i := range numBlocks { - newStateRoot := []byte(fmt.Sprintf("new_root_%d", i+1)) + newStateRoot := fmt.Appendf(nil, "new_root_%d", i+1) mockExec1.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, gen.InitialHeight+uint64(i), mock.AnythingOfType("time.Time"), lastStateRoot). Return(newStateRoot, nil).Once() lastStateRoot = newStateRoot diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index bd8088f7b6..31c2681c91 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -692,7 +692,7 @@ func submitToDA[T any]( func limitBatchBySize[T any](items []T, marshaled [][]byte, maxBytes int) ([]T, [][]byte, error) { total := 0 count := 0 - for i := 0; i < len(items); i++ { + for i := range items { sz := len(marshaled[i]) if sz > maxBytes { if i == 0 { diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index 2270463b9c..57301fa76f 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -64,7 +64,7 @@ func makeSignedDataBytesWithTime(t *testing.T, chainID string, height uint64, pr d := &types.Data{Metadata: &types.Metadata{ChainID: chainID, Height: height, Time: timestamp}} if txs > 0 { d.Txs = make(types.Txs, txs) - for i := 0; i < txs; i++ { + for i := range txs { d.Txs[i] = types.Tx([]byte{byte(height), byte(i)}) } } diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index a6529d5562..f83ef07829 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -122,7 +122,7 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay // prepare height events to emit heightEvents := make([]common.DAHeightEvent, totalHeights) - for i := uint64(0); i < totalHeights; i++ { + for i := range totalHeights { blockHeight, daHeight := i+gen.InitialHeight, i+daHeightOffset _, sh := makeSignedHeaderBytes(b, gen.ChainID, blockHeight, addr, pub, signer, nil, nil, nil) d := &types.Data{Metadata: &types.Metadata{ChainID: gen.ChainID, Height: blockHeight, Time: uint64(time.Now().UnixNano())}} @@ -137,7 +137,7 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay // Mock DA retriever to emit exactly totalHeights events, then HFF and cancel daR := NewMockDARetriever(b) daR.On("PopPriorityHeight").Return(uint64(0)).Maybe() - for i := uint64(0); i < totalHeights; i++ { + for i := range totalHeights { daHeight := i + daHeightOffset daR.On("RetrieveFromDA", mock.Anything, daHeight). Run(func(_ mock.Arguments) { diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index 075b6f1694..3c03992fce 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -137,7 +137,7 @@ func TestUpdateDynamicGracePeriod_IncreaseOnHighFullness(t *testing.T) { s.blockFullnessEMA.Store(&initialEMA) // Update multiple times with very high fullness to build up the effect - for i := 0; i < 20; i++ { + for range 20 { s.updateDynamicGracePeriod(0.95) } @@ -169,7 +169,7 @@ func TestUpdateDynamicGracePeriod_DecreaseOnLowFullness(t *testing.T) { s.blockFullnessEMA.Store(&initialEMA) // Update multiple times with low fullness to build up the effect - for i := 0; i < 20; i++ { + for range 20 { s.updateDynamicGracePeriod(0.2) } @@ -201,7 +201,7 @@ func TestUpdateDynamicGracePeriod_ClampToMin(t *testing.T) { s.blockFullnessEMA.Store(&initialEMA) // Update many times with very low fullness - should eventually clamp to min - for i := 0; i < 50; i++ { + for range 50 { s.updateDynamicGracePeriod(0.0) } @@ -228,7 +228,7 @@ func TestUpdateDynamicGracePeriod_ClampToMax(t *testing.T) { s.blockFullnessEMA.Store(&initialEMA) // Update many times with very high fullness - should eventually clamp to max - for i := 0; i < 50; i++ { + for range 50 { s.updateDynamicGracePeriod(1.0) } @@ -316,7 +316,7 @@ func TestDynamicGracePeriod_Integration_HighCongestion(t *testing.T) { s.blockFullnessEMA.Store(&initialEMA) // Simulate processing many blocks with very high fullness (above threshold) - for i := 0; i < 50; i++ { + for range 50 { s.updateDynamicGracePeriod(0.95) } @@ -349,7 +349,7 @@ func TestDynamicGracePeriod_Integration_LowCongestion(t *testing.T) { s.blockFullnessEMA.Store(&initialEMA) // Simulate processing many blocks with very low fullness (below threshold) - for i := 0; i < 50; i++ { + for range 50 { s.updateDynamicGracePeriod(0.1) } diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index c92f34e3c4..14cc539fdc 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -94,7 +94,7 @@ func makeData(chainID string, height uint64, txs int) *types.Data { } if txs > 0 { d.Txs = make(types.Txs, txs) - for i := 0; i < txs; i++ { + for i := range txs { d.Txs[i] = types.Tx([]byte{byte(height), byte(i)}) } } @@ -918,8 +918,7 @@ func TestSyncer_Stop_SkipsDrainOnCriticalError(t *testing.T) { s.hasCriticalError.Store(true) // Start a no-op goroutine tracked by the WaitGroup so Stop() doesn't block on wg.Wait() - s.wg.Add(1) - go func() { defer s.wg.Done() }() + s.wg.Go(func() {}) // Stop must complete quickly — no drain, no ExecuteTxs calls done := make(chan struct{}) @@ -991,8 +990,7 @@ func TestSyncer_Stop_DrainWorksWithoutCriticalError(t *testing.T) { s.heightInCh <- evt // hasCriticalError is false (default) — drain should process events including ExecuteTxs - s.wg.Add(1) - go func() { defer s.wg.Done() }() + s.wg.Go(func() {}) _ = s.Stop() diff --git a/node/helpers_test.go b/node/helpers_test.go index 2127ef9aaf..c2d9983195 100644 --- a/node/helpers_test.go +++ b/node/helpers_test.go @@ -282,7 +282,7 @@ func newTestP2PClient(config evconfig.Config, privKey crypto.PrivKey, ds datasto func createNodeContexts(n int) ([]context.Context, []context.CancelFunc) { ctxs := make([]context.Context, n) cancels := make([]context.CancelFunc, n) - for i := 0; i < n; i++ { + for i := range n { ctx, cancel := context.WithCancel(context.Background()) ctxs[i] = ctx cancels[i] = cancel diff --git a/pkg/da/jsonrpc/header.go b/pkg/da/jsonrpc/header.go index 573d01cb33..abcc2784a9 100644 --- a/pkg/da/jsonrpc/header.go +++ b/pkg/da/jsonrpc/header.go @@ -14,7 +14,7 @@ type Header struct { Height uint64 `json:"height,string,omitempty"` LastHash []byte `json:"last_header_hash,omitempty"` ChainID string `json:"chain_id,omitempty"` - BlockTime time.Time `json:"time,omitempty"` + BlockTime time.Time `json:"time"` } // RawHeader contains the raw tendermint header fields. diff --git a/pkg/da/selector_test.go b/pkg/da/selector_test.go index 6eda8c1dd6..ac3bd8f103 100644 --- a/pkg/da/selector_test.go +++ b/pkg/da/selector_test.go @@ -29,7 +29,7 @@ func TestRoundRobinSelector_SingleAddress(t *testing.T) { selector := NewRoundRobinSelector(addresses) // All calls should return the same address - for i := 0; i < 10; i++ { + for range 10 { addr := selector.Next() assert.Equal(t, "celestia1abc123", addr, "should always return the single address") } @@ -69,11 +69,11 @@ func TestRoundRobinSelector_Concurrent(t *testing.T) { var wg sync.WaitGroup // Launch concurrent goroutines - for i := 0; i < numGoroutines; i++ { + for i := range numGoroutines { wg.Add(1) go func(start int) { defer wg.Done() - for j := 0; j < numCallsPerGoroutine; j++ { + for j := range numCallsPerGoroutine { addr := selector.Next() results[start+j] = addr } @@ -110,7 +110,7 @@ func TestRoundRobinSelector_WrapAround(t *testing.T) { // Test wrap around behavior with large number of calls seen := make(map[string]int) - for i := 0; i < 1000; i++ { + for range 1000 { addr := selector.Next() seen[addr]++ } @@ -124,7 +124,7 @@ func TestNoOpSelector(t *testing.T) { selector := NewNoOpSelector() // Should always return empty string - for i := 0; i < 10; i++ { + for range 10 { addr := selector.Next() assert.Empty(t, addr, "NoOpSelector should always return empty string") } @@ -136,9 +136,9 @@ func TestNoOpSelector_Concurrent(t *testing.T) { const numGoroutines = 50 var wg sync.WaitGroup - for i := 0; i < numGoroutines; i++ { + for range numGoroutines { wg.Go(func() { - for j := 0; j < 100; j++ { + for range 100 { addr := selector.Next() assert.Empty(t, addr) } diff --git a/pkg/p2p/client_test.go b/pkg/p2p/client_test.go index 711dfaeb25..607ce6ce29 100644 --- a/pkg/p2p/client_test.go +++ b/pkg/p2p/client_test.go @@ -296,7 +296,7 @@ func TestClientInfoMethods(t *testing.T) { var hosts []host.Host var err error - for i := 0; i < 3; i++ { + for range 3 { nodeKey, e := key.GenerateNodeKey() require.NoError(e) h, e := mn.AddPeer(nodeKey.PrivKey, multiaddr.StringCast("/ip4/127.0.0.1/tcp/0")) diff --git a/pkg/p2p/utils_test.go b/pkg/p2p/utils_test.go index e14a621233..b26cd5173e 100644 --- a/pkg/p2p/utils_test.go +++ b/pkg/p2p/utils_test.go @@ -70,7 +70,7 @@ func startTestNetwork(ctx context.Context, t *testing.T, n int, conf map[int]hos require := require.New(t) mnet := mocknet.New() - for i := 0; i < n; i++ { + for i := range n { var descr hostDescr if d, ok := conf[i]; ok { descr = d diff --git a/pkg/raft/node.go b/pkg/raft/node.go index 224b749503..ada6838560 100644 --- a/pkg/raft/node.go +++ b/pkg/raft/node.go @@ -306,7 +306,7 @@ func (n *Node) SetApplyCallback(ch chan<- RaftApplyMsg) { } // Apply implements raft.FSM -func (f *FSM) Apply(log *raft.Log) interface{} { +func (f *FSM) Apply(log *raft.Log) any { var state RaftBlockState if err := proto.Unmarshal(log.Data, &state); err != nil { f.logger.Error().Err(err).Msg("unmarshal block state") diff --git a/pkg/rpc/server/da_visualization_test.go b/pkg/rpc/server/da_visualization_test.go index 7d6b28c81b..9d879cebcf 100644 --- a/pkg/rpc/server/da_visualization_test.go +++ b/pkg/rpc/server/da_visualization_test.go @@ -73,7 +73,7 @@ func TestRecordSubmissionMemoryLimit(t *testing.T) { server := NewDAVisualizationServer(da, logger, true) // Add 101 submissions (more than the limit of 100) - for i := 0; i < 101; i++ { + for i := range 101 { result := &coreda.ResultSubmit{ BaseResult: coreda.BaseResult{ Code: coreda.StatusSuccess, diff --git a/pkg/sequencers/common/checkpoint_test.go b/pkg/sequencers/common/checkpoint_test.go index 88b59ad888..84dcf4c7a2 100644 --- a/pkg/sequencers/common/checkpoint_test.go +++ b/pkg/sequencers/common/checkpoint_test.go @@ -120,7 +120,7 @@ func TestCheckpointStore_ConcurrentAccess(t *testing.T) { // Test concurrent reads done := make(chan bool, 10) - for i := 0; i < 10; i++ { + for range 10 { go func() { defer func() { done <- true }() loaded, err := store.Load(ctx) @@ -129,7 +129,7 @@ func TestCheckpointStore_ConcurrentAccess(t *testing.T) { }() } - for i := 0; i < 10; i++ { + for range 10 { <-done } } diff --git a/pkg/sequencers/single/queue_test.go b/pkg/sequencers/single/queue_test.go index f344f27ec0..259b96aa23 100644 --- a/pkg/sequencers/single/queue_test.go +++ b/pkg/sequencers/single/queue_test.go @@ -24,7 +24,7 @@ import ( // createTestBatch creates a batch with dummy transactions for testing func createTestBatch(t *testing.T, txCount int) coresequencer.Batch { txs := make([][]byte, txCount) - for i := 0; i < txCount; i++ { + for i := range txCount { txs[i] = []byte{byte(i), byte(i + 1), byte(i + 2)} } return coresequencer.Batch{Transactions: txs} @@ -353,7 +353,7 @@ func TestConcurrency(t *testing.T) { addWg := new(sync.WaitGroup) addWg.Add(numOperations) - for i := 0; i < numOperations; i++ { + for i := range numOperations { go func(index int) { defer addWg.Done() batch := createTestBatch(t, index%10+1) // 1-10 transactions @@ -377,7 +377,7 @@ func TestConcurrency(t *testing.T) { nextCount := numOperations / 2 nextWg.Add(nextCount) - for i := 0; i < nextCount; i++ { + for range nextCount { go func() { defer nextWg.Done() batch, err := bq.Next(ctx) @@ -499,7 +499,7 @@ func TestBatchQueue_QueueLimit_WithNext(t *testing.T) { ctx := context.Background() // Fill the queue to capacity - for i := 0; i < maxSize; i++ { + for i := range maxSize { batch := createTestBatch(t, i+1) err := bq.AddBatch(ctx, batch) if err != nil { @@ -562,11 +562,11 @@ func TestBatchQueue_QueueLimit_Concurrency(t *testing.T) { var errorCount int64 // Start multiple workers trying to add batches concurrently - for i := 0; i < numWorkers; i++ { + for i := range numWorkers { wg.Add(1) go func(workerID int) { defer wg.Done() - for j := 0; j < batchesPerWorker; j++ { + for j := range batchesPerWorker { batch := createTestBatch(t, workerID*batchesPerWorker+j+1) err := bq.AddBatch(ctx, batch) if err != nil { diff --git a/pkg/sequencers/single/sequencer_test.go b/pkg/sequencers/single/sequencer_test.go index 04d7f88721..dd32edfc07 100644 --- a/pkg/sequencers/single/sequencer_test.go +++ b/pkg/sequencers/single/sequencer_test.go @@ -775,7 +775,7 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { // Phase 1: Normal operation - send some batches successfully t.Log("Phase 1: Normal operation") - for i := 0; i < queueSize; i++ { + for i := range queueSize { batch := createTestBatch(t, i+1) req := coresequencer.SubmitBatchTxsRequest{ Id: []byte("test-chain"), @@ -825,7 +825,7 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { // Add batches until queue is full again batchesAdded := 0 - for i := 0; i < 10; i++ { // Try to add many batches + for i := range 10 { // Try to add many batches batch := createTestBatch(t, 100+i) req := coresequencer.SubmitBatchTxsRequest{ Id: []byte("test-chain"), @@ -1299,7 +1299,7 @@ func TestSequencer_GetNextBatch_GasFilteringPreservesUnprocessedTxs(t *testing.T // Process multiple batches to consume all forced txs // Use maxBytes=120 to fetch only 2 txs at a time (each is 50 bytes) - for i := 0; i < 5; i++ { // Max 5 iterations to prevent infinite loop + for i := range 5 { // Max 5 iterations to prevent infinite loop req := coresequencer.GetNextBatchRequest{ Id: []byte("test-gas-preserve"), MaxBytes: 120, // Limits to ~2 txs per batch diff --git a/pkg/signer/file/file_signer_test.go b/pkg/signer/file/file_signer_test.go index f33a1f854a..ec24e07186 100644 --- a/pkg/signer/file/file_signer_test.go +++ b/pkg/signer/file/file_signer_test.go @@ -391,9 +391,9 @@ func TestConcurrentAccess(t *testing.T) { errChan := make(chan error, numGoRoutines*messageCount) doneChan := make(chan struct{}) - for i := 0; i < numGoRoutines; i++ { + for i := range numGoRoutines { go func(routineNum int) { - for j := 0; j < messageCount; j++ { + for j := range messageCount { // Create a unique message per goroutine and iteration message := fmt.Appendf(nil, "Message %d-%d", routineNum, j) diff --git a/pkg/store/cached_store_test.go b/pkg/store/cached_store_test.go index 1ce4266713..2579eb48a6 100644 --- a/pkg/store/cached_store_test.go +++ b/pkg/store/cached_store_test.go @@ -74,7 +74,7 @@ func TestCachedStore_GetHeader_MultipleHeights(t *testing.T) { // Load multiple heights headers := make([]*types.SignedHeader, 5) - for i := 0; i < 5; i++ { + for i := range 5 { h := uint64(i + 1) header, err := cachedStore.GetHeader(ctx, h) require.NoError(t, err) @@ -84,7 +84,7 @@ func TestCachedStore_GetHeader_MultipleHeights(t *testing.T) { } // Re-access all heights - should return same cached references - for i := 0; i < 5; i++ { + for i := range 5 { h := uint64(i + 1) header, err := cachedStore.GetHeader(ctx, h) require.NoError(t, err) diff --git a/pkg/store/store_adapter_test.go b/pkg/store/store_adapter_test.go index 21c42370b2..c0427ddbf0 100644 --- a/pkg/store/store_adapter_test.go +++ b/pkg/store/store_adapter_test.go @@ -270,10 +270,10 @@ func TestPendingCache_ConcurrentAccess(t *testing.T) { wg.Add(numGoroutines * 3) // readers, writers, deleters // Writers - for i := 0; i < numGoroutines; i++ { + for i := range numGoroutines { go func(offset int) { defer wg.Done() - for j := 0; j < numOpsPerGoroutine; j++ { + for j := range numOpsPerGoroutine { height := uint64(offset*numOpsPerGoroutine + j + 1) h, _ := types.GetRandomBlock(height, 1, "test-chain") cache.add(&types.P2PSignedHeader{SignedHeader: h}) @@ -282,10 +282,10 @@ func TestPendingCache_ConcurrentAccess(t *testing.T) { } // Readers - for i := 0; i < numGoroutines; i++ { + for range numGoroutines { go func() { defer wg.Done() - for j := 0; j < numOpsPerGoroutine; j++ { + for j := range numOpsPerGoroutine { _ = cache.len() _ = cache.getMaxHeight() _, _ = cache.head() @@ -295,10 +295,10 @@ func TestPendingCache_ConcurrentAccess(t *testing.T) { } // Deleters (delete some items) - for i := 0; i < numGoroutines; i++ { + for i := range numGoroutines { go func(offset int) { defer wg.Done() - for j := 0; j < numOpsPerGoroutine/2; j++ { + for j := range numOpsPerGoroutine / 2 { height := uint64(offset*numOpsPerGoroutine + j + 1) cache.delete(height) } @@ -327,7 +327,7 @@ func TestStoreAdapter_Backpressure(t *testing.T) { // Create many items const numItems = 100 items := make([]*types.P2PSignedHeader, numItems) - for i := 0; i < numItems; i++ { + for i := range numItems { h, _ := types.GetRandomBlock(uint64(i+1), 1, "test-chain") items[i] = &types.P2PSignedHeader{SignedHeader: h} } @@ -552,10 +552,10 @@ func TestStoreAdapter_ConcurrentAppendAndRead(t *testing.T) { wg.Add(numWriters + numReaders) // Writers - for w := 0; w < numWriters; w++ { + for w := range numWriters { go func(writerID int) { defer wg.Done() - for i := 0; i < itemsPerWriter; i++ { + for i := range itemsPerWriter { height := uint64(writerID*itemsPerWriter + i + 1) h, _ := types.GetRandomBlock(height, 1, "test-chain") _ = adapter.Append(ctx, &types.P2PSignedHeader{SignedHeader: h}) @@ -564,10 +564,10 @@ func TestStoreAdapter_ConcurrentAppendAndRead(t *testing.T) { } // Readers - for r := 0; r < numReaders; r++ { + for range numReaders { go func() { defer wg.Done() - for i := 0; i < itemsPerWriter*numWriters; i++ { + for i := range itemsPerWriter * numWriters { _ = adapter.Height() _, _ = adapter.Head(ctx) _ = adapter.HasAt(ctx, uint64(i+1)) diff --git a/pkg/store/store_test.go b/pkg/store/store_test.go index 08ead91074..bd5e952f45 100644 --- a/pkg/store/store_test.go +++ b/pkg/store/store_test.go @@ -323,11 +323,11 @@ func TestMetadata(t *testing.T) { } const n = 5 - for i := 0; i < n; i++ { + for i := range n { require.NoError(s.SetMetadata(t.Context(), getKey(i), getValue(i))) } - for i := 0; i < n; i++ { + for i := range n { value, err := s.GetMetadata(t.Context(), getKey(i)) require.NoError(err) require.Equal(getValue(i), value) diff --git a/pkg/telemetry/testutil/attributes.go b/pkg/telemetry/testutil/attributes.go index 6a6ede4cd0..90d34ca7ba 100644 --- a/pkg/telemetry/testutil/attributes.go +++ b/pkg/telemetry/testutil/attributes.go @@ -9,7 +9,7 @@ import ( ) // RequireAttribute asserts that an attribute with the given key exists and has the expected value. -func RequireAttribute(t *testing.T, attrs []attribute.KeyValue, key string, expected interface{}) { +func RequireAttribute(t *testing.T, attrs []attribute.KeyValue, key string, expected any) { t.Helper() found := false for _, attr := range attrs { diff --git a/tools/db-bench/main.go b/tools/db-bench/main.go index ab07357c96..210aa8706c 100644 --- a/tools/db-bench/main.go +++ b/tools/db-bench/main.go @@ -123,10 +123,7 @@ func runProfile(p profile, dir string, cfg config) (result, error) { overwriteEvery := 0 if cfg.overwriteRatio > 0 { - overwriteEvery = int(math.Round(1.0 / cfg.overwriteRatio)) - if overwriteEvery < 1 { - overwriteEvery = 1 - } + overwriteEvery = max(int(math.Round(1.0/cfg.overwriteRatio)), 1) } kv, err := p.open(dir) @@ -145,7 +142,7 @@ func runProfile(p profile, dir string, cfg config) (result, error) { pending := 0 keysWritten := 0 - for i := 0; i < totalWrites; i++ { + for i := range totalWrites { keyIndex := keysWritten if overwriteEvery > 0 && i%overwriteEvery == 0 && keysWritten > 0 { keyIndex = i % keysWritten diff --git a/tools/local-da/local.go b/tools/local-da/local.go index 82ccfc0493..c00ff16dcb 100644 --- a/tools/local-da/local.go +++ b/tools/local-da/local.go @@ -245,7 +245,7 @@ func (d *LocalDA) Validate(ctx context.Context, ids []datypes.ID, proofs []datyp return nil, errors.New("number of IDs doesn't equal to number of proofs") } results := make([]bool, len(ids)) - for i := 0; i < len(ids); i++ { + for i := range ids { results[i] = ed25519.Verify(d.pubKey, ids[i][8:], proofs[i]) d.logger.Debug().Interface("id", ids[i]).Bool("result", results[i]).Msg("Validate result") } diff --git a/types/serialization_test.go b/types/serialization_test.go index ce73188deb..2229d96d43 100644 --- a/types/serialization_test.go +++ b/types/serialization_test.go @@ -21,7 +21,7 @@ func TestBlockSerializationRoundTrip(t *testing.T) { // create random hashes h := []Hash{} - for i := 0; i < 8; i++ { + for range 8 { h1 := make(Hash, 32) n, err := rand.Read(h1[:]) require.Equal(32, n)