From 8aedb947782bede310946f3387c596183357d592 Mon Sep 17 00:00:00 2001 From: Matthis Holleville Date: Mon, 13 Nov 2023 19:11:45 +0100 Subject: [PATCH] feat: Completion of cache pkg rework. Added cache purge command. Signed-off-by: Matthis Holleville --- README.md | 15 ++++++-- cmd/cache/add.go | 8 +++-- cmd/cache/purge.go | 54 ++++++++++++++++++++++++++++ pkg/cache/azuresa_based.go | 20 ++++++++--- pkg/cache/cache.go | 72 ++++++++++++++++++++++---------------- pkg/cache/file_based.go | 21 +++++++++-- pkg/cache/gcs_based.go | 37 ++++++++++++-------- pkg/cache/s3_based.go | 21 +++++++++-- pkg/server/config.go | 2 +- 9 files changed, 191 insertions(+), 59 deletions(-) create mode 100644 cmd/cache/purge.go diff --git a/README.md b/README.md index 8bb7cd5c02..645fbb49e7 100644 --- a/README.md +++ b/README.md @@ -595,19 +595,28 @@ _Adding a remote cache_ * AWS S3 * _As a prerequisite `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` are required as environmental variables._ - * Configuration, ``` k8sgpt cache add --region --bucket ``` + * Configuration, ``` k8sgpt cache add s3 --region --bucket ``` * K8sGPT will create the bucket if it does not exist * Azure Storage * We support a number of [techniques](https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication?tabs=bash#2-authenticate-with-azure) to authenticate against Azure - * Configuration, ``` k8sgpt cache add --storageacc --container ``` + * Configuration, ``` k8sgpt cache add azure --storageacc --container ``` * K8sGPT assumes that the storage account already exist and it will create the container if it does not exist - * It's **users'** responsibility have to grant specific permissions to their identity in order to be able to upload blob files and create SA containers (e.g Storage Blob Data Contributor) + * It's **users'** responsibility have to grant specific permissions to their identity in order to be able to upload blob files and create SA containers (e.g Storage Blob Data Contributor) + * Google Cloud Storage + * _As a prerequisite `GOOGLE_APPLICATION_CREDENTIALS` are required as environmental variables._ + * Configuration, ``` k8sgpt cache add gcs --region --bucket --projectid ``` + * K8sGPT will create the bucket if it does not exist _Listing cache items_ ``` k8sgpt cache list ``` +_Purging an object from the cache_ +``` +k8sgpt cache purge $OBJECT_NAME +``` + _Removing the remote cache_ Note: this will not delete the upstream S3 bucket or Azure storage container ``` diff --git a/cmd/cache/add.go b/cmd/cache/add.go index c720b9b6f8..a78b4db989 100644 --- a/cmd/cache/add.go +++ b/cmd/cache/add.go @@ -33,7 +33,7 @@ var ( // addCmd represents the add command var addCmd = &cobra.Command{ - Use: "add", + Use: "add [cache type]", Short: "Add a remote cache", Long: `This command allows you to add a remote cache to store the results of an analysis. The supported cache types are: @@ -41,9 +41,13 @@ var addCmd = &cobra.Command{ - Google Cloud storage - S3`, Run: func(cmd *cobra.Command, args []string) { + if len(args) == 0 { + color.Red("Error: Please provide a value for cache types. Run k8sgpt cache add --help") + os.Exit(1) + } fmt.Println(color.YellowString("Adding remote based cache")) cacheType := args[0] - remoteCache, err := cache.NewCacheProvider(cacheType, bucketname, region, storageAccount, containerName, projectId, false) + remoteCache, err := cache.NewCacheProvider(cacheType, bucketname, region, storageAccount, containerName, projectId) if err != nil { color.Red("Error: %v", err) os.Exit(1) diff --git a/cmd/cache/purge.go b/cmd/cache/purge.go new file mode 100644 index 0000000000..332f2967c6 --- /dev/null +++ b/cmd/cache/purge.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The K8sGPT Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package cache + +import ( + "fmt" + "os" + + "github.com/fatih/color" + "github.com/k8sgpt-ai/k8sgpt/pkg/cache" + "github.com/spf13/cobra" +) + +var purgeCmd = &cobra.Command{ + Use: "purge [object name]", + Short: "Purge a remote cache", + Long: "This command allows you to delete/purge one object from the cache", + Run: func(cmd *cobra.Command, args []string) { + if len(args) == 0 { + color.Red("Error: Please provide a value for object name. Run k8sgpt cache purge --help") + os.Exit(1) + } + objectKey := args[0] + fmt.Println(color.YellowString("Purging a remote cache.")) + c, err := cache.GetCacheConfiguration(false) + if err != nil { + color.Red("Error: %v", err) + os.Exit(1) + } + + err = c.Remove(objectKey) + if err != nil { + color.Red("Error: %v", err) + os.Exit(1) + } + fmt.Println(color.GreenString("Object deleted.")) + }, +} + +func init() { + CacheCmd.AddCommand(purgeCmd) +} diff --git a/pkg/cache/azuresa_based.go b/pkg/cache/azuresa_based.go index 3c4cc3d6be..5c929b863a 100644 --- a/pkg/cache/azuresa_based.go +++ b/pkg/cache/azuresa_based.go @@ -9,6 +9,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" ) // Generate ICache implementation @@ -24,8 +25,8 @@ type AzureCacheConfiguration struct { ContainerName string `mapstructure:"container" yaml:"container,omitempty"` } -func (s *AzureCache) Configure(cacheInfo CacheProvider, noCache bool) error { - ctx := context.Background() +func (s *AzureCache) Configure(cacheInfo CacheProvider) error { + s.ctx = context.Background() if cacheInfo.Azure.ContainerName == "" { log.Fatal("Azure Container name not configured") } @@ -44,7 +45,7 @@ func (s *AzureCache) Configure(cacheInfo CacheProvider, noCache bool) error { log.Fatal(err) } // Try to create the blob container - _, err = client.CreateContainer(ctx, cacheInfo.Azure.ContainerName, nil) + _, err = client.CreateContainer(s.ctx, cacheInfo.Azure.ContainerName, nil) if err != nil { // TODO: Maybe there is a better way to check this? // docs: https://pkg.go.dev/github.com/Azure/azure-storage-blob-go/azblob @@ -56,7 +57,6 @@ func (s *AzureCache) Configure(cacheInfo CacheProvider, noCache bool) error { } s.containerName = cacheInfo.Azure.ContainerName s.session = client - s.noCache = noCache return nil @@ -112,6 +112,14 @@ func (s *AzureCache) List() ([]CacheObjectDetails, error) { return files, nil } +func (s *AzureCache) Remove(key string) error { + _, err := s.session.DeleteBlob(s.ctx, s.containerName, key, &blob.DeleteOptions{}) + if err != nil { + return err + } + return nil +} + func (s *AzureCache) Exists(key string) bool { // Check if the object exists in the blob storage pager := s.session.NewListBlobsFlatPager(s.containerName, &azblob.ListBlobsFlatOptions{ @@ -141,3 +149,7 @@ func (s *AzureCache) IsCacheDisabled() bool { func (s *AzureCache) GetName() string { return "azure" } + +func (s *AzureCache) DisableCache() { + s.noCache = true +} diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index 800c901f50..a3d91b8246 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -1,6 +1,7 @@ package cache import ( + "errors" "fmt" "reflect" @@ -19,16 +20,18 @@ var ( ) type ICache interface { - Configure(cacheInfo CacheProvider, noCache bool) error + Configure(cacheInfo CacheProvider) error Store(key string, data string) error Load(key string) (string, error) List() ([]CacheObjectDetails, error) + Remove(key string) error Exists(key string) bool IsCacheDisabled() bool GetName() string + DisableCache() } -func New(noCache bool, cacheType string) ICache { +func New(cacheType string) ICache { for _, t := range types { if cacheType == t.GetName() { return t @@ -37,8 +40,6 @@ func New(noCache bool, cacheType string) ICache { return &FileBasedCache{} } -// CacheProvider is the configuration for the cache provider when using a remote cache - func ParseCacheConfiguration() (CacheProvider, error) { var cacheInfo CacheProvider err := viper.UnmarshalKey("cache", &cacheInfo) @@ -48,8 +49,7 @@ func ParseCacheConfiguration() (CacheProvider, error) { return cacheInfo, nil } -func NewCacheProvider(cacheType, bucketname, region, storageAccount, containerName, projectId string, noCache bool) (CacheProvider, error) { - cache := New(false, cacheType) +func NewCacheProvider(cacheType, bucketname, region, storageAccount, containerName, projectId string) (CacheProvider, error) { cProvider := CacheProvider{} switch { @@ -64,25 +64,26 @@ func NewCacheProvider(cacheType, bucketname, region, storageAccount, containerNa cProvider.S3.BucketName = bucketname cProvider.S3.Region = region default: - return CacheProvider{}, status.Error(codes.Internal, fmt.Sprintf("%s is not a possible option", cacheType)) + return CacheProvider{}, status.Error(codes.Internal, fmt.Sprintf("%s is not a valid option", cacheType)) } - err := cache.Configure(cProvider, noCache) + cache := New(cacheType) + err := cache.Configure(cProvider) if err != nil { return CacheProvider{}, err } return cProvider, nil } -// If we have set a remote cache, return the remote cache type +// If we have set a remote cache, return the remote cache configuration func GetCacheConfiguration(noCache bool) (ICache, error) { - // load remote cache if it is configured - var cache ICache cacheInfo, err := ParseCacheConfiguration() if err != nil { return nil, err } + var cache ICache + switch { case !reflect.DeepEqual(cacheInfo.GCS, GCSCacheConfiguration{}): cache = &GCSCache{} @@ -94,15 +95,30 @@ func GetCacheConfiguration(noCache bool) (ICache, error) { cache = &FileBasedCache{} } - cache.Configure(cacheInfo, noCache) + cache.Configure(cacheInfo) return cache, nil } +func HasAnyConfiguration(cacheInfo CacheProvider) bool { + return !reflect.DeepEqual(cacheInfo.GCS, GCSCacheConfiguration{}) || + !reflect.DeepEqual(cacheInfo.Azure, AzureCacheConfiguration{}) || + !reflect.DeepEqual(cacheInfo.S3, S3CacheConfiguration{}) +} + func AddRemoteCache(cacheInfo CacheProvider) error { + actualConfig, err := ParseCacheConfiguration() + if err != nil { + return err + } + + if HasAnyConfiguration(actualConfig) { + return errors.New("Cache configuration already exist. Please use update method.") + } + viper.Set("cache", cacheInfo) - err := viper.WriteConfig() + err = viper.WriteConfig() if err != nil { return err } @@ -110,23 +126,19 @@ func AddRemoteCache(cacheInfo CacheProvider) error { } func RemoveRemoteCache() error { + var cacheInfo CacheProvider + err := viper.UnmarshalKey("cache", &cacheInfo) + if err != nil { + return status.Error(codes.Internal, "cache unmarshal") + } + + cacheInfo = CacheProvider{} + viper.Set("cache", cacheInfo) + err = viper.WriteConfig() + if err != nil { + return status.Error(codes.Internal, "unable to write config") + } + return nil - // var cacheInfo CacheProvider - // err := viper.UnmarshalKey("cache", &cacheInfo) - // if err != nil { - // return status.Error(codes.Internal, "cache unmarshal") - // } - // if cacheInfo.BucketName == "" && cacheInfo.ContainerName == "" && cacheInfo.StorageAccount == "" { - // return status.Error(codes.Internal, "no remote cache configured") - // } - - // cacheInfo = CacheProvider{} - // viper.Set("cache", cacheInfo) - // err = viper.WriteConfig() - // if err != nil { - // return status.Error(codes.Internal, "unable to write config") - // } - - // return nil } diff --git a/pkg/cache/file_based.go b/pkg/cache/file_based.go index 4096dc6bfa..66926a7e85 100644 --- a/pkg/cache/file_based.go +++ b/pkg/cache/file_based.go @@ -15,8 +15,7 @@ type FileBasedCache struct { noCache bool } -func (f *FileBasedCache) Configure(cacheInfo CacheProvider, noCache bool) error { - f.noCache = noCache +func (f *FileBasedCache) Configure(cacheInfo CacheProvider) error { return nil } @@ -84,6 +83,20 @@ func (*FileBasedCache) Load(key string) (string, error) { return string(data), nil } +func (*FileBasedCache) Remove(key string) error { + path, err := xdg.CacheFile(filepath.Join("k8sgpt", key)) + + if err != nil { + return err + } + + if err := os.Remove(path); err != nil { + return err + } + + return nil +} + func (*FileBasedCache) Store(key string, data string) error { path, err := xdg.CacheFile(filepath.Join("k8sgpt", key)) @@ -97,3 +110,7 @@ func (*FileBasedCache) Store(key string, data string) error { func (s *FileBasedCache) GetName() string { return "file" } + +func (s *FileBasedCache) DisableCache() { + s.noCache = true +} diff --git a/pkg/cache/gcs_based.go b/pkg/cache/gcs_based.go index 6a7a5466fe..093b5fbdf8 100644 --- a/pkg/cache/gcs_based.go +++ b/pkg/cache/gcs_based.go @@ -10,6 +10,7 @@ import ( ) type GCSCache struct { + ctx context.Context noCache bool bucketName string projectId string @@ -23,7 +24,8 @@ type GCSCacheConfiguration struct { BucketName string `mapstructure:"bucketname" yaml:"bucketname,omitempty"` } -func (s *GCSCache) Configure(cacheInfo CacheProvider, noCache bool) error { +func (s *GCSCache) Configure(cacheInfo CacheProvider) error { + s.ctx = context.Background() if cacheInfo.GCS.BucketName == "" { log.Fatal("Bucket name not configured") } @@ -36,16 +38,14 @@ func (s *GCSCache) Configure(cacheInfo CacheProvider, noCache bool) error { s.bucketName = cacheInfo.GCS.BucketName s.projectId = cacheInfo.GCS.ProjectId s.region = cacheInfo.GCS.Region - s.noCache = noCache - ctx := context.Background() - storageClient, err := storage.NewClient(ctx) + storageClient, err := storage.NewClient(s.ctx) if err != nil { log.Fatal(err) } - _, err = storageClient.Bucket(s.bucketName).Attrs(ctx) + _, err = storageClient.Bucket(s.bucketName).Attrs(s.ctx) if err == storage.ErrBucketNotExist { - storageClient.Bucket(s.bucketName).Create(ctx, s.projectId, &storage.BucketAttrs{ + storageClient.Bucket(s.bucketName).Create(s.ctx, s.projectId, &storage.BucketAttrs{ Location: s.region, }) } @@ -54,8 +54,7 @@ func (s *GCSCache) Configure(cacheInfo CacheProvider, noCache bool) error { } func (s *GCSCache) Store(key string, data string) error { - ctx := context.Background() - wc := s.session.Bucket(s.bucketName).Object(key).NewWriter(ctx) + wc := s.session.Bucket(s.bucketName).Object(key).NewWriter(s.ctx) if _, err := wc.Write([]byte(data)); err != nil { return err @@ -69,8 +68,7 @@ func (s *GCSCache) Store(key string, data string) error { } func (s *GCSCache) Load(key string) (string, error) { - ctx := context.Background() - reader, err := s.session.Bucket(s.bucketName).Object(key).NewReader(ctx) + reader, err := s.session.Bucket(s.bucketName).Object(key).NewReader(s.ctx) if err != nil { return "", err } @@ -84,11 +82,19 @@ func (s *GCSCache) Load(key string) (string, error) { return string(data), nil } +func (s *GCSCache) Remove(key string) error { + bucketClient := s.session.Bucket(s.bucketName) + obj := bucketClient.Object(key) + if err := obj.Delete(s.ctx); err != nil { + return err + } + return nil +} + func (s *GCSCache) List() ([]CacheObjectDetails, error) { - ctx := context.Background() var files []CacheObjectDetails - items := s.session.Bucket(s.bucketName).Objects(ctx, nil) + items := s.session.Bucket(s.bucketName).Objects(s.ctx, nil) for { attrs, err := items.Next() if err == iterator.Done { @@ -106,9 +112,8 @@ func (s *GCSCache) List() ([]CacheObjectDetails, error) { } func (s *GCSCache) Exists(key string) bool { - ctx := context.Background() obj := s.session.Bucket(s.bucketName).Object(key) - _, err := obj.Attrs(ctx) + _, err := obj.Attrs(s.ctx) return err == nil } @@ -119,3 +124,7 @@ func (s *GCSCache) IsCacheDisabled() bool { func (s *GCSCache) GetName() string { return "gcs" } + +func (s *GCSCache) DisableCache() { + s.noCache = true +} diff --git a/pkg/cache/s3_based.go b/pkg/cache/s3_based.go index 9211401d14..e0ed641fd2 100644 --- a/pkg/cache/s3_based.go +++ b/pkg/cache/s3_based.go @@ -21,7 +21,7 @@ type S3CacheConfiguration struct { BucketName string `mapstructure:"bucketname" yaml:"bucketname,omitempty"` } -func (s *S3Cache) Configure(cacheInfo CacheProvider, noCache bool) error { +func (s *S3Cache) Configure(cacheInfo CacheProvider) error { if cacheInfo.S3.BucketName == "" { log.Fatal("Bucket name not configured") } @@ -49,7 +49,6 @@ func (s *S3Cache) Configure(cacheInfo CacheProvider, noCache bool) error { }) } s.session = s3Client - s.noCache = noCache return nil } @@ -64,6 +63,18 @@ func (s *S3Cache) Store(key string, data string) error { } +func (s *S3Cache) Remove(key string) error { + _, err := s.session.DeleteObject(&s3.DeleteObjectInput{ + Bucket: &s.bucketName, + Key: aws.String(key), + }) + + if err != nil { + return err + } + return nil +} + func (s *S3Cache) Load(key string) (string, error) { // Retrieve the object from the bucket and load it into a string @@ -115,5 +126,9 @@ func (s *S3Cache) IsCacheDisabled() bool { } func (s *S3Cache) GetName() string { - return "gcs" + return "s3" +} + +func (s *S3Cache) DisableCache() { + s.noCache = true } diff --git a/pkg/server/config.go b/pkg/server/config.go index bf20c3ab9d..5837d6482d 100644 --- a/pkg/server/config.go +++ b/pkg/server/config.go @@ -24,7 +24,7 @@ func (h *handler) AddConfig(ctx context.Context, i *schemav1.AddConfigRequest) ( return resp, status.Error(codes.InvalidArgument, "mixed cache arguments") } - cacheProvider, err := cache.NewCacheProvider(i.Cache.BucketName, i.Cache.Region, i.Cache.StorageAccount, i.Cache.ContainerName, "", "", false) + cacheProvider, err := cache.NewCacheProvider(i.Cache.BucketName, i.Cache.Region, i.Cache.StorageAccount, i.Cache.ContainerName, "", "") err = cache.AddRemoteCache(cacheProvider) if err != nil { return resp, err