Skip to content

Commit

Permalink
lint up (part two)
Browse files Browse the repository at this point in the history
* enable gocritic ifElseChain

Signed-off-by: Alex Aizman <[email protected]>
  • Loading branch information
alex-aizman committed Jan 1, 2025
1 parent 30609cd commit abb6767
Show file tree
Hide file tree
Showing 22 changed files with 209 additions and 169 deletions.
3 changes: 1 addition & 2 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -94,10 +94,9 @@ linters-settings:
- performance
- style
disabled-checks:
- ifElseChain ## TODO: enable
- unnamedResult
gosec:
excludes: ## integer overflow; week rand
excludes: ## integer overflow; weak rand
- G115
- G402
- G404
Expand Down
131 changes: 67 additions & 64 deletions ais/test/bucket_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// Package integration_test.
/*
* Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018=2025, NVIDIA CORPORATION. All rights reserved.
*/
package integration_test

Expand Down Expand Up @@ -882,17 +882,19 @@ func TestListObjectsStartAfter(t *testing.T) {

msg := &apc.LsoMsg{PageSize: 10, StartAfter: middleObjName}
lst, err = api.ListObjects(baseParams, m.bck, msg, api.ListArgs{})
if bck.IsAIS() {

switch {
case bck.IsAIS():
tassert.CheckFatal(t, err)
tassert.Errorf(
t, len(lst.Entries) == m.num/2,
"unexpected number of entries (got: %d, expected: %d)",
len(lst.Entries), m.num/2,
)
} else if err != nil {
case err != nil:
herr := cmn.Err2HTTPErr(err)
tlog.Logf("Error is expected here, got %q\n", herr)
} else {
default:
tassert.Errorf(t, false, "expected an error, got nil")
}
})
Expand Down Expand Up @@ -2428,12 +2430,13 @@ func TestCopyBucket(t *testing.T) {
}
}

if bckTest.IsAIS() {
switch {
case bckTest.IsAIS():
srcm.puts()

srcBckList, err = api.ListObjects(baseParams, srcm.bck, nil, api.ListArgs{})
tassert.CheckFatal(t, err)
} else if bckTest.IsRemote() {
case bckTest.IsRemote():
srcm.remotePuts(false /*evict*/)
srcBckList, err = api.ListObjects(baseParams, srcm.bck, nil, api.ListArgs{})
tassert.CheckFatal(t, err)
Expand All @@ -2447,8 +2450,8 @@ func TestCopyBucket(t *testing.T) {
tassert.CheckFatal(t, err)
}
defer srcm.del()
} else {
panic(bckTest)
default:
t.Fatal("invalid provider", bckTest.String())
}

xactIDs := make([]string, 0, len(dstms))
Expand Down Expand Up @@ -3202,60 +3205,59 @@ func testWarmValidation(t *testing.T, cksumType string, mirrored, eced bool) {
baseParams := tools.BaseAPIParams(m.proxyURL)
tools.CreateBucket(t, m.proxyURL, m.bck, nil, true /*cleanup*/)

{
if mirrored {
_, err := api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{
Cksum: &cmn.CksumConfToSet{
Type: apc.Ptr(cksumType),
ValidateWarmGet: apc.Ptr(true),
},
Mirror: &cmn.MirrorConfToSet{
Enabled: apc.Ptr(true),
Copies: apc.Ptr[int64](copyCnt),
},
})
tassert.CheckFatal(t, err)
} else if eced {
if m.smap.CountActiveTs() < parityCnt+1 {
t.Fatalf("Not enough targets to run %s test, must be at least %d", t.Name(), parityCnt+1)
}
_, err := api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{
Cksum: &cmn.CksumConfToSet{
Type: apc.Ptr(cksumType),
ValidateWarmGet: apc.Ptr(true),
},
EC: &cmn.ECConfToSet{
Enabled: apc.Ptr(true),
ObjSizeLimit: apc.Ptr[int64](cos.GiB), // only slices
DataSlices: apc.Ptr(1),
ParitySlices: apc.Ptr(parityCnt),
},
})
tassert.CheckFatal(t, err)
} else {
_, err := api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{
Cksum: &cmn.CksumConfToSet{
Type: apc.Ptr(cksumType),
ValidateWarmGet: apc.Ptr(true),
},
})
tassert.CheckFatal(t, err)
}

p, err := api.HeadBucket(baseParams, m.bck, true /* don't add */)
switch {
case mirrored:
_, err := api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{
Cksum: &cmn.CksumConfToSet{
Type: apc.Ptr(cksumType),
ValidateWarmGet: apc.Ptr(true),
},
Mirror: &cmn.MirrorConfToSet{
Enabled: apc.Ptr(true),
Copies: apc.Ptr[int64](copyCnt),
},
})
tassert.CheckFatal(t, err)
if p.Cksum.Type != cksumType {
t.Fatalf("failed to set checksum: %q != %q", p.Cksum.Type, cksumType)
}
if !p.Cksum.ValidateWarmGet {
t.Fatal("failed to set checksum: validate_warm_get not enabled")
}
if mirrored && !p.Mirror.Enabled {
t.Fatal("failed to mirroring")
}
if eced && !p.EC.Enabled {
t.Fatal("failed to enable erasure coding")
case eced:
if m.smap.CountActiveTs() < parityCnt+1 {
t.Fatalf("Not enough targets to run %s test, must be at least %d", t.Name(), parityCnt+1)
}
_, err := api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{
Cksum: &cmn.CksumConfToSet{
Type: apc.Ptr(cksumType),
ValidateWarmGet: apc.Ptr(true),
},
EC: &cmn.ECConfToSet{
Enabled: apc.Ptr(true),
ObjSizeLimit: apc.Ptr[int64](cos.GiB), // only slices
DataSlices: apc.Ptr(1),
ParitySlices: apc.Ptr(parityCnt),
},
})
tassert.CheckFatal(t, err)
default:
_, err := api.SetBucketProps(baseParams, m.bck, &cmn.BpropsToSet{
Cksum: &cmn.CksumConfToSet{
Type: apc.Ptr(cksumType),
ValidateWarmGet: apc.Ptr(true),
},
})
tassert.CheckFatal(t, err)
}

p, err := api.HeadBucket(baseParams, m.bck, true /* don't add */)
tassert.CheckFatal(t, err)
if p.Cksum.Type != cksumType {
t.Fatalf("failed to set checksum: %q != %q", p.Cksum.Type, cksumType)
}
if !p.Cksum.ValidateWarmGet {
t.Fatal("failed to set checksum: validate_warm_get not enabled")
}
if mirrored && !p.Mirror.Enabled {
t.Fatal("failed to mirroring")
}
if eced && !p.EC.Enabled {
t.Fatal("failed to enable erasure coding")
}

m.puts()
Expand Down Expand Up @@ -3408,10 +3410,11 @@ func TestBucketListAndSummary(t *testing.T) {
m.initAndSaveState(true /*cleanup*/)
m.expectTargets(1)

if m.bck.IsAIS() {
switch {
case m.bck.IsAIS():
tools.CreateBucket(t, m.proxyURL, m.bck, nil, true /*cleanup*/)
m.puts()
} else if m.bck.IsRemote() {
case m.bck.IsRemote():
m.bck = cliBck
tools.CheckSkip(t, &tools.SkipTestArgs{RemoteBck: true, Bck: m.bck})
tlog.Logf("remote %s\n", m.bck.Cname(""))
Expand All @@ -3426,8 +3429,8 @@ func TestBucketListAndSummary(t *testing.T) {
m.remotePrefetch(cacheSize)
expectedFiles = cacheSize
}
} else {
t.Fatal(test.provider)
default:
t.Fatal("invalid provider", test.provider)
}

tlog.Logln("checking objects...")
Expand Down
10 changes: 6 additions & 4 deletions ais/test/common_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// Package integration_test.
/*
* Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018=2025, NVIDIA CORPORATION. All rights reserved.
*/
package integration_test

Expand Down Expand Up @@ -300,13 +300,15 @@ func (m *ioContext) _remoteFill(objCnt int, evict, override bool) {
tassert.CheckFatal(m.t, err)

var objName string
if override {
switch {
case override:
objName = m.objNames[i]
} else if m.ordered {
case m.ordered:
objName = fmt.Sprintf("%s%d", m.prefix, i)
} else {
default:
objName = fmt.Sprintf("%s%s-%d", m.prefix, trand.String(8), i)
}

wg.Add(1)
go func() {
defer wg.Done()
Expand Down
12 changes: 7 additions & 5 deletions ais/test/dsort_test.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
//nolint:dupl // copy-paste benign and can wait
// Package integration_test.
/*
* Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018=2025, NVIDIA CORPORATION. All rights reserved.
*/
package integration_test

Expand Down Expand Up @@ -307,16 +307,18 @@ func (df *dsortFramework) createInputShards() {
} else {
tarName = path + df.inputExt
}
if df.alg.Kind == dsort.Content {

switch {
case df.alg.Kind == dsort.Content:
err = tarch.CreateArchCustomFiles(tarName, df.tarFormat, df.inputExt, df.filesPerShard,
df.fileSz, df.alg.ContentKeyType, df.alg.Ext, df.missingKeys)
} else if df.recordNames != nil {
case df.recordNames != nil:
err = tarch.CreateArchRandomFiles(tarName, df.tarFormat, df.inputExt, df.filesPerShard,
df.fileSz, duplication, true, df.recordExts, df.recordNames)
} else if df.inputExt == archive.ExtTar {
case df.inputExt == archive.ExtTar:
err = tarch.CreateArchRandomFiles(tarName, df.tarFormat, df.inputExt, df.filesPerShard,
df.fileSz, duplication, false, df.recordExts, nil)
} else {
default:
err = tarch.CreateArchRandomFiles(tarName, df.tarFormat, df.inputExt, df.filesPerShard,
df.fileSz, duplication, false, nil, nil)
}
Expand Down
17 changes: 10 additions & 7 deletions ais/test/ec_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// Package integration_test.
/*
* Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018=2025, NVIDIA CORPORATION. All rights reserved.
*/
package integration_test

Expand Down Expand Up @@ -179,12 +179,13 @@ func ecCheckSlices(t *testing.T, sliceList map[string]ecSliceMD,
ct, err := core.NewCTFromFQN(k, nil)
tassert.CheckFatal(t, err)

if ct.ContentType() == fs.ECMetaType {
switch {
case ct.ContentType() == fs.ECMetaType:
metaCnt++
tassert.Errorf(t, md.size <= 4*cos.KiB, "Metafile %q size is too big: %d", k, md.size)
} else if ct.ContentType() == fs.ECSliceType {
case ct.ContentType() == fs.ECSliceType:
tassert.Errorf(t, md.size == sliceSize, "Slice %q size mismatch: %d, expected %d", k, md.size, sliceSize)
} else {
default:
tassert.Errorf(t, ct.ContentType() == fs.ObjectType, "invalid content type %s, expected: %s", ct.ContentType(), fs.ObjectType)
tassert.Errorf(t, ct.Bck().Name == bck.Name, "invalid bucket name %s, expected: %s", ct.Bck().Name, bck.Name)
tassert.Errorf(t, ct.ObjectName() == objPath, "invalid object name %s, expected: %s", ct.ObjectName(), objPath)
Expand Down Expand Up @@ -356,18 +357,20 @@ func doECPutsAndCheck(t *testing.T, baseParams api.BaseParams, bck cmn.Bck, o *e
for k, md := range foundParts {
ct, err := core.NewCTFromFQN(k, nil)
tassert.CheckFatal(t, err)
if ct.ContentType() == fs.ECMetaType {

switch {
case ct.ContentType() == fs.ECMetaType:
metaCnt++
tassert.Errorf(t, md.size <= 512, "Metafile %q size is too big: %d", k, md.size)
} else if ct.ContentType() == fs.ECSliceType {
case ct.ContentType() == fs.ECSliceType:
sliceCnt++
if md.size != sliceSize && doEC {
t.Errorf("Slice %q size mismatch: %d, expected %d", k, md.size, sliceSize)
}
if md.size != objSize && !doEC {
t.Errorf("Copy %q size mismatch: %d, expected %d", k, md.size, objSize)
}
} else {
default:
tassert.Errorf(t, ct.ContentType() == fs.ObjectType, "invalid content type %s, expected: %s", ct.ContentType(), fs.ObjectType)
tassert.Errorf(t, ct.Bck().Provider == bck.Provider, "invalid provider %s, expected: %s", ct.Bck().Provider, apc.AIS)
tassert.Errorf(t, ct.Bck().Name == bck.Name, "invalid bucket name %s, expected: %s", ct.Bck().Name, bck.Name)
Expand Down
9 changes: 5 additions & 4 deletions ais/test/objprops_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// Package integration_test.
/*
* Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018=2025, NVIDIA CORPORATION. All rights reserved.
*/
package integration_test

Expand Down Expand Up @@ -606,11 +606,12 @@ func TestObjProps(t *testing.T) {
}

func testListObjects(t *testing.T, proxyURL string, bck cmn.Bck, msg *apc.LsoMsg) *cmn.LsoRes {
if msg == nil {
switch {
case msg == nil:
tlog.Logf("LIST %s []\n", bck)
} else if msg.Prefix == "" && msg.PageSize == 0 && msg.ContinuationToken == "" {
case msg.Prefix == "" && msg.PageSize == 0 && msg.ContinuationToken == "":
tlog.Logf("LIST %s [cached: %t]\n", bck, msg.IsFlagSet(apc.LsObjCached))
} else {
default:
tlog.Logf("LIST %s [prefix: %q, page_size: %d, cached: %t, token: %q]\n",
bck, msg.Prefix, msg.PageSize, msg.IsFlagSet(apc.LsObjCached), msg.ContinuationToken)
}
Expand Down
9 changes: 5 additions & 4 deletions ais/test/promote_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// Package integration_test.
/*
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2022=2025, NVIDIA CORPORATION. All rights reserved.
*/
package integration_test

Expand Down Expand Up @@ -280,19 +280,20 @@ func (test *prmTests) wait(t *testing.T, xid, tempdir string, target *meta.Snode
}

// wait "cases" 1. through 3.
if xid != "" && !test.singleTarget { // 1. cluster-wide xaction
switch {
case xid != "" && !test.singleTarget: // 1. cluster-wide xaction
tlog.Logf("Waiting for global %s(%s=>%s)\n", xname, tempdir, m.bck)
notifStatus, err := api.WaitForXactionIC(baseParams, &xargs)
tassert.CheckFatal(t, err)
if notifStatus != nil && (notifStatus.AbortedX || notifStatus.ErrMsg != "") {
tlog.Logf("Warning: notif-status: %+v\n", notifStatus)
}
} else if xid != "" && test.singleTarget { // 2. single-target xaction
case xid != "" && test.singleTarget: // 2. single-target xaction
xargs.DaemonID = target.ID()
tlog.Logf("Waiting for %s(%s=>%s) at %s\n", xname, tempdir, m.bck, target.StringEx())
err := api.WaitForXactionNode(baseParams, &xargs, xactSnapNotRunning)
tassert.CheckFatal(t, err)
} else { // 3. synchronous execution
default: // 3. synchronous execution
tlog.Logf("Promoting without xaction (%s=>%s)\n", tempdir, m.bck)
}

Expand Down
9 changes: 5 additions & 4 deletions ais/test/s3_compat_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// Package integration_test.
/*
* Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018=2025, NVIDIA CORPORATION. All rights reserved.
*/
package integration_test

Expand Down Expand Up @@ -126,13 +126,14 @@ func setBucketFeatures(t *testing.T, bck cmn.Bck, bprops *cmn.Bprops, nf feat.Fl
}

func loadCredentials(t *testing.T) (f func(*config.LoadOptions) error) {
if os.Getenv("AWS_ACCESS_KEY_ID") != "" && os.Getenv("AWS_SECRET_ACCESS_KEY") != "" {
switch {
case os.Getenv("AWS_ACCESS_KEY_ID") != "" && os.Getenv("AWS_SECRET_ACCESS_KEY") != "":
f = config.WithCredentialsProvider(
credentials.NewStaticCredentialsProvider(os.Getenv("AWS_ACCESS_KEY_ID"), os.Getenv("AWS_SECRET_ACCESS_KEY"), ""),
)
} else if os.Getenv("AWS_PROFILE") != "" {
case os.Getenv("AWS_PROFILE") != "":
f = config.WithSharedConfigProfile(os.Getenv("AWS_PROFILE"))
} else {
default:
t.Skip("Failed to load credentials, none of AWS_PROFILE, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY are set")
f = func(*config.LoadOptions) error { return nil }
}
Expand Down
Loading

0 comments on commit abb6767

Please sign in to comment.