Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion allocator/allocator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ type pendingResultWithChan struct {
}

type allocStep struct {
op interface{}
op any
totals map[peer.ID]uint64
expectedPending []pendingResult
}
8 changes: 4 additions & 4 deletions benchmarks/benchmark_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ func benchmarkRepeatedDisconnects(ctx context.Context, b *testing.B, numnodes in
require.NoError(b, err)
start := time.Now()
errgrp, grpctx := errgroup.WithContext(ctx)
for j := 0; j < numnodes; j++ {
for j := range numnodes {
instance := instances[j+1]
_, errChan := fetcher.Exchange.Request(grpctx, instance.Peer, cidlink.Link{Cid: allCids[i][j]}, allSelector)
other := instance.Peer
Expand Down Expand Up @@ -152,7 +152,7 @@ func p2pStrestTest(ctx context.Context, b *testing.B, numfiles int, df distFunc,
instances, err := ig.Instances(1 + b.N)
require.NoError(b, err)
var allCids []cid.Cid
for i := 0; i < numfiles; i++ {
for range numfiles {
thisCids := df(ctx, b, instances[:1])
allCids = append(allCids, thisCids...)
}
Expand All @@ -170,7 +170,7 @@ func p2pStrestTest(ctx context.Context, b *testing.B, numfiles int, df distFunc,
require.NoError(b, err)
start := time.Now()
errgrp, grpctx := errgroup.WithContext(ctx)
for j := 0; j < numfiles; j++ {
for j := range numfiles {
responseChan, errChan := fetcher.Exchange.Request(grpctx, instances[0].Peer, cidlink.Link{Cid: allCids[j]}, allSelector)
errgrp.Go(func() error {
for range responseChan {
Expand Down Expand Up @@ -223,7 +223,7 @@ func subtestDistributeAndFetch(ctx context.Context, b *testing.B, numnodes int,
require.NoError(b, err)
start := time.Now()
errgrp, grpctx := errgroup.WithContext(ctx)
for j := 0; j < numnodes; j++ {
for j := range numnodes {
instance := instances[j]
_, errChan := fetcher.Exchange.Request(grpctx, instance.Peer, cidlink.Link{Cid: destCids[j]}, allSelector)

Expand Down
2 changes: 1 addition & 1 deletion benchmarks/testinstance/testinstance.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func (g *InstanceGenerator) Next() (Instance, error) {
// them to each other
func (g *InstanceGenerator) Instances(n int) ([]Instance, error) {
var instances []Instance
for j := 0; j < n; j++ {
for range n {
inst, err := g.Next()
if err != nil {
return nil, err
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func TestInternetLatencyDelayNextWaitTimeDistribution(t *testing.T) {

// strategy here is rather than mock randomness, just use enough samples to
// get approximately the distribution you'd expect
for i := 0; i < 10000; i++ {
for range 10000 {
next := internetLatencyDistributionDelay.NextWaitTime(initialValue)
if math.Abs((next - initialValue).Seconds()) <= deviation.Seconds() {
buckets["fast"]++
Expand Down
8 changes: 2 additions & 6 deletions benchmarks/testnet/peernet.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package testnet

import (
"context"
"slices"

tnet "github.com/libp2p/go-libp2p-testing/net"
"github.com/libp2p/go-libp2p/core/peer"
Expand Down Expand Up @@ -32,12 +33,7 @@ func (pn *peernet) Adapter(p tnet.Identity) gsnet.GraphSyncNetwork {
}

func (pn *peernet) HasPeer(p peer.ID) bool {
for _, member := range pn.Mocknet.Peers() {
if p == member {
return true
}
}
return false
return slices.Contains(pn.Mocknet.Peers(), p)
}

var _ Network = (*peernet)(nil)
7 changes: 3 additions & 4 deletions impl/graphsync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,6 @@ func TestGraphsyncRoundTripRequestBudgets(t *testing.T) {
}

for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
// create network
ctx := context.Background()
Expand Down Expand Up @@ -439,7 +438,7 @@ func TestGraphsyncRoundTripHooksOrder(t *testing.T) {
require.Len(t, td.blockStore1, blockChainLength, "did not store all blocks")

var calledHooks []string
for i := 0; i < 5; i++ {
for range 5 {
select {
case <-ctx.Done():
t.Fatal("did not receive all events")
Expand Down Expand Up @@ -1970,8 +1969,8 @@ func TestPanicHandlingInTraversal(t *testing.T) {
}

// initialize graphsync on first node to make requests and set a panic callback
var panicObj interface{}
requestor := td.GraphSyncHost1(PanicCallback(func(recoverObj interface{}, debugStackTrace string) {
var panicObj any
requestor := td.GraphSyncHost1(PanicCallback(func(recoverObj any, debugStackTrace string) {
panicObj = recoverObj
}))

Expand Down
4 changes: 2 additions & 2 deletions ipldutil/traverser_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ func TestTraverser(t *testing.T) {
},
}.Start(ctx)
var path ipld.Path
for i := 0; i < 6; i++ {
for range 6 {
path = path.AppendSegment(ipld.PathSegmentOfString("Parents"))
path = path.AppendSegment(ipld.PathSegmentOfInt(0))
}
Expand Down Expand Up @@ -155,7 +155,7 @@ func TestTraverser(t *testing.T) {

var err error
// To ensure the state isn't broken, do multiple calls.
for i := 0; i < 3; i++ {
for range 3 {
err = traverser.Advance(bytes.NewBuffer(nil))
require.Error(t, err)

Expand Down
13 changes: 4 additions & 9 deletions message/message.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package message
import (
"fmt"
"io"
"maps"
"strings"

blocks "github.com/ipfs/go-block-format"
Expand Down Expand Up @@ -256,17 +257,11 @@ func (gsm GraphSyncMessage) Blocks() []blocks.Block {
// Clone returns a shallow copy of this GraphSyncMessage
func (gsm GraphSyncMessage) Clone() GraphSyncMessage {
requests := make(map[graphsync.RequestID]GraphSyncRequest, len(gsm.requests))
for id, request := range gsm.requests {
requests[id] = request
}
maps.Copy(requests, gsm.requests)
responses := make(map[graphsync.RequestID]GraphSyncResponse, len(gsm.responses))
for id, response := range gsm.responses {
responses[id] = response
}
maps.Copy(responses, gsm.responses)
blocks := make(map[cid.Cid]blocks.Block, len(gsm.blocks))
for cid, block := range gsm.blocks {
blocks[cid] = block
}
maps.Copy(blocks, gsm.blocks)
return GraphSyncMessage{requests, responses, blocks}
}

Expand Down
8 changes: 2 additions & 6 deletions message/v2/message_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"bytes"
"errors"
"math/rand"
"slices"
"testing"

blocks "github.com/ipfs/go-block-format"
Expand Down Expand Up @@ -146,12 +147,7 @@ func TestAppendBlock(t *testing.T) {
}

func contains(strs []string, x string) bool {
for _, s := range strs {
if s == x {
return true
}
}
return false
return slices.Contains(strs, x)
}

func TestRequestCancel(t *testing.T) {
Expand Down
2 changes: 1 addition & 1 deletion network/libp2p_impl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ func TestMessageSendAndReceive(t *testing.T) {
require.True(t, found)
require.Equal(t, extension.Data, extensionData)

for i := 0; i < 2; i++ {
for range 2 {
testutil.AssertDoesReceive(ctx, t, r.connectedPeers, "peers were not notified")
}

Expand Down
6 changes: 3 additions & 3 deletions notifications/types.go
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
package notifications

// Topic is a topic that events appear on
type Topic interface{}
type Topic any

// Event is a publishable event
type Event interface{}
type Event any

// TopicData is data added to every message broadcast on a topic
type TopicData interface{}
type TopicData any

// Subscriber is a subscriber that can receive events
type Subscriber interface {
Expand Down
8 changes: 4 additions & 4 deletions panics/panics.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,15 @@ import (
)

// CallBackFn is a function that will get called with information about the panic
type CallBackFn func(recoverObj interface{}, debugStackTrace string)
type CallBackFn func(recoverObj any, debugStackTrace string)

// PanicHandler is a function that can be called with the result of revover() within a deferred
// to recover from panics and pass them to a callback it returns an error if a recovery was needed
type PanicHandler func(interface{}) error
type PanicHandler func(any) error

// MakeHandler makes a handler that recovers from panics and passes them to the given callback
func MakeHandler(cb CallBackFn) PanicHandler {
return func(obj interface{}) error {
return func(obj any) error {
if obj == nil {
return nil
}
Expand All @@ -33,7 +33,7 @@ func MakeHandler(cb CallBackFn) PanicHandler {
// The assumption is we want to make sure all of graphsync doesn't go down cause a single block load
// or selector execution fails
type RecoveredPanicErr struct {
PanicObj interface{}
PanicObj any
DebugStackTrace string
}

Expand Down
2 changes: 1 addition & 1 deletion peermanager/peermanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ type PeerProcess interface {
Shutdown()
}

type PeerHandler interface{}
type PeerHandler any

// PeerProcessFactory provides a function that will create a PeerQueue.
type PeerProcessFactory func(ctx context.Context, p peer.ID, onShutdown func(peer.ID)) PeerHandler
Expand Down
2 changes: 1 addition & 1 deletion peerstate/peerstate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import (

func TestDiagnostics(t *testing.T) {
requestIDs := make([]graphsync.RequestID, 0, 5)
for i := 0; i < 5; i++ {
for range 5 {
requestIDs = append(requestIDs, graphsync.NewRequestID())
}
testCases := map[string]struct {
Expand Down
5 changes: 1 addition & 4 deletions requestmanager/executor/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -212,10 +212,7 @@ func (e *Executor) processResult(rt RequestTask, link datamodel.Link, result typ

func (e *Executor) startRemoteRequest(rt RequestTask) error {
request := rt.Request
doNotSendFirstBlocks := rt.DoNotSendFirstBlocks
if doNotSendFirstBlocks < int64(rt.Traverser.NBlocksTraversed()) {
doNotSendFirstBlocks = int64(rt.Traverser.NBlocksTraversed())
}
doNotSendFirstBlocks := max(rt.DoNotSendFirstBlocks, int64(rt.Traverser.NBlocksTraversed()))
if doNotSendFirstBlocks > 0 {
doNotSendFirstBlocksData := donotsendfirstblocks.EncodeDoNotSendFirstBlocks(doNotSendFirstBlocks)
request = rt.Request.ReplaceExtensions([]graphsync.ExtensionData{{Name: graphsync.ExtensionsDoNotSendFirstBlocks, Data: doNotSendFirstBlocksData}})
Expand Down
2 changes: 1 addition & 1 deletion requestmanager/reconciledloader/remotequeue.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import (
)

var linkedRemoteItemPool = sync.Pool{
New: func() interface{} {
New: func() any {
return new(remotedLinkedItem)
},
}
Expand Down
2 changes: 1 addition & 1 deletion requestmanager/requestmanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1046,7 +1046,7 @@ func (fph *fakePeerHandler) AllocateAndBuildMessage(p peer.ID, blkSize uint64,

func readNNetworkRequests(ctx context.Context, t *testing.T, td *testData, count int) []requestRecord {
requestRecords := make(map[graphsync.RequestID]requestRecord, count)
for i := 0; i < count; i++ {
for i := range count {
var rr requestRecord
testutil.AssertReceive(ctx, t, td.requestRecordChan, &rr, fmt.Sprintf("did not receive request %d", i))
requestRecords[rr.gsr.ID()] = rr
Expand Down
2 changes: 1 addition & 1 deletion requestmanager/responsecollector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func TestBufferingResponseProgress(t *testing.T) {
require.Equal(t, block.Cid(), testResponse.LastBlock.Link.(cidlink.Link).Cid, "did not store block correctly")
}

for i := 0; i < 2; i++ {
for i := range 2 {
var testErr error
testutil.AssertReceive(ctx, t, outgoingErrors, &testErr, "should have read from channel but couldn't")
if i == 0 {
Expand Down
2 changes: 1 addition & 1 deletion responsemanager/queryexecutor/queryexecutor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ func newTestData(t *testing.T, blockCount int, expectedTraverse int) (*testData,

td.expectedBlocks = make([]*blockData, 0)
links := make([]ipld.Link, 0)
for i := 0; i < blockCount; i++ {
for i := range blockCount {
td.expectedBlocks = append(td.expectedBlocks, newRandomBlock(int64(i)))
links = append(links, td.expectedBlocks[i].link)
}
Expand Down
8 changes: 4 additions & 4 deletions responsemanager/responsemanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func TestIncomingQuery(t *testing.T) {
responseManager.Startup()

responseManager.ProcessRequests(td.ctx, td.p, td.requests)
for i := 0; i < len(blks); i++ {
for range blks {
td.assertSendBlock()
}
td.assertCompleteRequestWith(graphsync.RequestCompletedFull)
Expand Down Expand Up @@ -1379,7 +1379,7 @@ func (td *testData) assertReceiveExtensionResponse() {
}

func (td *testData) verifyNResponsesOnlyProcessing(blockCount int) {
for i := 0; i < blockCount; i++ {
for range blockCount {
testutil.AssertDoesReceive(td.ctx, td.t, td.sentResponses, "should sent block")
}
testutil.AssertChannelEmpty(td.t, td.sentResponses, "should not send more blocks")
Expand All @@ -1388,7 +1388,7 @@ func (td *testData) verifyNResponsesOnlyProcessing(blockCount int) {
func (td *testData) verifyNResponses(blockCount int) {
td.verifyNResponsesOnlyProcessing(blockCount)
td.notifyBlockSendsSent()
for i := 0; i < blockCount; i++ {
for range blockCount {
testutil.AssertDoesReceive(td.ctx, td.t, td.blockSends, "should sent block")
}
testutil.AssertChannelEmpty(td.t, td.blockSends, "should not send more blocks")
Expand Down Expand Up @@ -1460,7 +1460,7 @@ func (td *testData) assertNoCompletedResponseStatuses() {
}

func (td *testData) assertNetworkErrors(err error, count int) {
for i := 0; i < count; i++ {
for range count {
td.assertHasNetworkErrors(err)
}
testutil.AssertChannelEmpty(td.t, td.networkErrorChan, "should not send more blocks")
Expand Down
2 changes: 1 addition & 1 deletion taskqueue/taskqueue.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ func (tq *WorkerTaskQueue) WithPeerTopics(p peer.ID, withPeerTopics func(*peertr

// Startup runs the given number of task workers with the given executor
func (tq *WorkerTaskQueue) Startup(workerCount uint64, executor Executor) {
for i := uint64(0); i < workerCount; i++ {
for range workerCount {
go tq.worker(executor)
}
}
Expand Down
Loading