From b3e787989d2b25970072b91e6f663ea46d86eb29 Mon Sep 17 00:00:00 2001 From: Anton Galitsyn Date: Mon, 23 Jan 2023 21:33:54 +0700 Subject: [PATCH] rename packages --- cmd/api/main.go | 6 +- cmd/server/main.go | 20 +- pkg/app/api/client/agent_server.go | 9 +- pkg/app/api/models/services.go | 8 +- pkg/app/api/modules/modules.go | 34 +-- pkg/app/api/server/private/agents.go | 6 +- pkg/app/api/server/private/binaries.go | 6 +- pkg/app/api/server/private/modules.go | 172 ++++++++------- pkg/app/api/server/private/policies.go | 6 +- pkg/app/api/server/private/upgrades.go | 13 +- pkg/app/api/storage/mem/connection.go | 10 +- pkg/app/server/certs/config/config.go | 4 +- .../mmodule/hardening/cache/connector.go | 10 +- .../hardening/conn_validator_factory.go | 4 +- .../mmodule/hardening/v1/abher/abh_list.go | 4 +- .../mmodule/hardening/v1/sbher/sbher.go | 4 +- .../hardening/v1/validator/validator.go | 4 +- pkg/app/server/mmodule/mmodule.go | 9 +- .../server/mmodule/upgrader/cache/cache.go | 6 +- pkg/controller/config.go | 9 +- pkg/controller/files.go | 7 +- pkg/controller/loader.go | 24 +-- pkg/{s3/structs.go => filestorage/file.go} | 90 ++------ pkg/{s3 => filestorage/fs}/fs.go | 110 +++++----- pkg/{ => filestorage}/s3/s3.go | 200 +++++++++++------- pkg/{log => logtooling}/log.go | 2 +- pkg/{semver => semvertooling}/semver.go | 2 +- pkg/vxproto/proto.go | 4 +- 28 files changed, 397 insertions(+), 386 deletions(-) rename pkg/{s3/structs.go => filestorage/file.go} (55%) rename pkg/{s3 => filestorage/fs}/fs.go (72%) rename pkg/{ => filestorage}/s3/s3.go (65%) rename pkg/{log => logtooling}/log.go (96%) rename pkg/{semver => semvertooling}/semver.go (98%) diff --git a/cmd/api/main.go b/cmd/api/main.go index 3ed0e965..159dd653 100644 --- a/cmd/api/main.go +++ b/cmd/api/main.go @@ -25,7 +25,7 @@ import ( useraction "soldr/pkg/app/api/user_action" "soldr/pkg/app/api/worker" "soldr/pkg/app/api/worker/events" - "soldr/pkg/log" + "soldr/pkg/logtooling" "soldr/pkg/mysql" "soldr/pkg/observability" "soldr/pkg/secret" @@ -161,8 +161,8 @@ func main() { MaxAge: 14, Compress: true, } - logrus.SetLevel(log.ParseLevel(cfg.Log.Level)) - logrus.SetFormatter(log.ParseFormat(cfg.Log.Format)) + logrus.SetLevel(logtooling.ParseLevel(cfg.Log.Level)) + logrus.SetFormatter(logtooling.ParseFormat(cfg.Log.Format)) logrus.SetOutput(io.MultiWriter(os.Stdout, logFile)) dsn := fmt.Sprintf("%s:%s@%s/%s?parseTime=true", diff --git a/cmd/server/main.go b/cmd/server/main.go index 757d4b9a..fc260fc9 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -34,8 +34,10 @@ import ( "soldr/pkg/app/server/mmodule" "soldr/pkg/controller" "soldr/pkg/db" + "soldr/pkg/filestorage" + "soldr/pkg/filestorage/fs" + "soldr/pkg/filestorage/s3" "soldr/pkg/observability" - "soldr/pkg/s3" "soldr/pkg/system" "soldr/pkg/utils" "soldr/pkg/vxproto" @@ -104,7 +106,7 @@ func (s *Server) Init(env svc.Environment) (err error) { case loaderTypeFS: cl, err = controller.NewConfigFromFS(s.config.Base) case loaderTypeS3: - var s3ConnParams *s3.RemoteStorageConfig + var s3ConnParams *s3.Config s3ConnParams, err = s3ConnParamsFromConfig(&s.config.S3) if err != nil { err = fmt.Errorf("failed to compose s3 connection params from config: %w", err) @@ -133,7 +135,7 @@ func (s *Server) Init(env svc.Environment) (err error) { case loaderTypeFS: fl, err = controller.NewFilesFromFS(s.config.Base) case loaderTypeS3: - var s3ConnParams *s3.RemoteStorageConfig + var s3ConnParams *s3.Config s3ConnParams, err = s3ConnParamsFromConfig(&s.config.S3) if err != nil { err = fmt.Errorf("failed to compose s3 connection params from config: %w", err) @@ -150,7 +152,7 @@ func (s *Server) Init(env svc.Environment) (err error) { logger.Info("modules files loader was created") utils.RemoveUnusedTempDir() - store, err := s3.NewRemoteStorage(&s3.RemoteStorageConfig{ + store, err := s3.New(&s3.Config{ Endpoint: s.config.S3.Endpoint, AccessKey: s.config.S3.AccessKey, SecretKey: s.config.S3.SecretKey, @@ -214,7 +216,7 @@ func dsnFromConfig(c *config.DB) (*db.DSN, error) { }, nil } -func s3ConnParamsFromConfig(c *config.S3) (*s3.RemoteStorageConfig, error) { +func s3ConnParamsFromConfig(c *config.S3) (*s3.Config, error) { if c == nil { return nil, fmt.Errorf("passed config is nil") } @@ -230,7 +232,7 @@ func s3ConnParamsFromConfig(c *config.S3) (*s3.RemoteStorageConfig, error) { if len(c.BucketName) == 0 { return nil, fmt.Errorf("bucket name is empty") } - return &s3.RemoteStorageConfig{ + return &s3.Config{ Endpoint: c.Endpoint, AccessKey: c.AccessKey, SecretKey: c.SecretKey, @@ -238,11 +240,11 @@ func s3ConnParamsFromConfig(c *config.S3) (*s3.RemoteStorageConfig, error) { }, nil } -func initCertProvider(c *config.CertsConfig, s3FileReader s3.FileReader) (certs.Provider, error) { +func initCertProvider(c *config.CertsConfig, s3FileReader filestorage.Reader) (certs.Provider, error) { if c == nil { return nil, fmt.Errorf("passed config object is nil") } - createFileProvider := func(store s3.FileReader, base string) (certs.Provider, error) { + createFileProvider := func(store filestorage.Reader, base string) (certs.Provider, error) { conf := &certsConfig.Config{ StaticProvider: &certsConfig.StaticProvider{ Reader: store, @@ -257,7 +259,7 @@ func initCertProvider(c *config.CertsConfig, s3FileReader s3.FileReader) (certs. } switch c.Type { case loaderTypeFS: - store, err := s3.NewLocalStorage() + store, err := fs.New() if err != nil { return nil, fmt.Errorf("failed to initialize a file store: %w", err) } diff --git a/pkg/app/api/client/agent_server.go b/pkg/app/api/client/agent_server.go index d2396fda..8227322a 100644 --- a/pkg/app/api/client/agent_server.go +++ b/pkg/app/api/client/agent_server.go @@ -10,8 +10,9 @@ import ( "soldr/pkg/app/api/models" "soldr/pkg/app/api/storage/mem" + "soldr/pkg/filestorage" + "soldr/pkg/filestorage/s3" "soldr/pkg/mysql" - "soldr/pkg/s3" "soldr/pkg/secret" ) @@ -70,7 +71,7 @@ func (c *AgentServerClient) GetDB(ctx context.Context, hash string) (*gorm.DB, e return dbWithORM, nil } -func (c *AgentServerClient) GetS3(hash string) (s3.FileStorage, error) { +func (c *AgentServerClient) GetS3(hash string) (filestorage.Storage, error) { s3Conn, err := c.s3Conns.Get(hash) if err == nil { return s3Conn, nil @@ -81,9 +82,9 @@ func (c *AgentServerClient) GetS3(hash string) (s3.FileStorage, error) { return nil, fmt.Errorf("could not get service by hash '%s': %w", hash, err) } - s3Conn, err = s3.NewRemoteStorage(service.Info.S3.ToS3ConnParams()) + s3Conn, err = s3.New(service.Info.S3.ToS3ConnParams()) if err != nil { - return nil, fmt.Errorf("could not create RemoteStorage client: %w", err) + return nil, fmt.Errorf("could not create Client client: %w", err) } c.s3Conns.Set(hash, s3Conn) diff --git a/pkg/app/api/models/services.go b/pkg/app/api/models/services.go index c86ee305..0129b3ed 100644 --- a/pkg/app/api/models/services.go +++ b/pkg/app/api/models/services.go @@ -6,7 +6,7 @@ import ( "github.com/jinzhu/gorm" - "soldr/pkg/s3" + "soldr/pkg/filestorage/s3" ) // ServiceInfoDB is model to contain service external config to connetion to DB @@ -23,7 +23,7 @@ func (sidb ServiceInfoDB) Valid() error { return validate.Struct(sidb) } -// ServiceInfoS3 is model to contain service external config to connetion to RemoteStorage +// ServiceInfoS3 is model to contain service external config to connetion to Client type ServiceInfoS3 struct { Endpoint string `form:"endpoint" json:"endpoint" validate:"max=100,required"` AccessKey string `form:"access_key" json:"access_key" validate:"max=50,required"` @@ -32,8 +32,8 @@ type ServiceInfoS3 struct { } // ToS3ConnParams is a helper function to convert the structure to the vxcommon version one -func (sis3 *ServiceInfoS3) ToS3ConnParams() *s3.RemoteStorageConfig { - return &s3.RemoteStorageConfig{ +func (sis3 *ServiceInfoS3) ToS3ConnParams() *s3.Config { + return &s3.Config{ Endpoint: sis3.Endpoint, AccessKey: sis3.AccessKey, SecretKey: sis3.SecretKey, diff --git a/pkg/app/api/modules/modules.go b/pkg/app/api/modules/modules.go index 3b3b501b..23fa0718 100644 --- a/pkg/app/api/modules/modules.go +++ b/pkg/app/api/modules/modules.go @@ -12,7 +12,7 @@ import ( "soldr/pkg/app/api/models" "soldr/pkg/app/api/utils" "soldr/pkg/crypto" - "soldr/pkg/s3" + "soldr/pkg/filestorage/s3" ) type agentModuleDetails struct { @@ -50,9 +50,9 @@ func joinPath(args ...string) string { } func CopyModuleAFilesToInstanceS3(mi *models.ModuleInfo, sv *models.Service) error { - gS3, err := s3.NewRemoteStorage(nil) + gS3, err := s3.New(nil) if err != nil { - return errors.New("failed to initialize global RemoteStorage driver: " + err.Error()) + return errors.New("failed to initialize global Client driver: " + err.Error()) } mfiles, err := gS3.ReadDirRec(joinPath(mi.Name, mi.Version.String())) @@ -67,24 +67,24 @@ func CopyModuleAFilesToInstanceS3(mi *models.ModuleInfo, sv *models.Service) err return errors.New("failed to read utils files: " + err.Error()) } - iS3, err := s3.NewRemoteStorage(sv.Info.S3.ToS3ConnParams()) + iS3, err := s3.New(sv.Info.S3.ToS3ConnParams()) if err != nil { - return errors.New("failed to initialize instance RemoteStorage driver: " + err.Error()) + return errors.New("failed to initialize instance Client driver: " + err.Error()) } if iS3.RemoveDir(joinPath(mi.Name, mi.Version.String())); err != nil { - return errors.New("failed to remove module directory from instance RemoteStorage: " + err.Error()) + return errors.New("failed to remove module directory from instance Client: " + err.Error()) } for fpath, fdata := range mfiles { if err := iS3.WriteFile(joinPath(mi.Name, mi.Version.String(), fpath), fdata); err != nil { - return errors.New("failed to write system module file to RemoteStorage: " + err.Error()) + return errors.New("failed to write system module file to Client: " + err.Error()) } } for fpath, fdata := range ufiles { if err := iS3.WriteFile(joinPath("utils", fpath), fdata); err != nil { - return errors.New("failed to write utils file to RemoteStorage: " + err.Error()) + return errors.New("failed to write utils file to Client: " + err.Error()) } } @@ -156,8 +156,8 @@ func clearMapKeysList(curMap, defMap map[string]interface{}) map[string]interfac // args: // -// cc is Current module RemoteStorageConfig from old module version which we tried to keep -// dc is Default module RemoteStorageConfig from actual module version which we are using as a reference +// cc is Current module Config from old module version which we tried to keep +// dc is Default module Config from actual module version which we are using as a reference // sh is JSON Schema structure from actual module version which wa are using to check result document func mergeModuleACurrentConfig(cc, dc models.ModuleConfig, sh models.Schema) models.ModuleConfig { // add new config values from default @@ -204,8 +204,8 @@ func mergeModuleASecureCurrentConfig(cc, dc models.ModuleSecureConfig, sh models // args: // -// cac is Current Action RemoteStorageConfig from old module version which we tried to keep -// dac is Default Action RemoteStorageConfig from actual module version which we are using as a reference +// cac is Current Action Config from old module version which we tried to keep +// dac is Default Action Config from actual module version which we are using as a reference // sh is JSON Schema structure from actual module version which wa are using to check result document func mergeModuleACurrentActionConfig(cac, dac models.ActionConfig, sh models.Schema) models.ActionConfig { for acn, daci := range dac { @@ -239,8 +239,8 @@ func mergeModuleACurrentActionConfig(cac, dac models.ActionConfig, sh models.Sch // args: // -// ceci is Current Event RemoteStorageConfig Item from old module version which we tried to keep -// deci is Default Event RemoteStorageConfig Item from actual module version which we are using as a reference +// ceci is Current Event Config Item from old module version which we tried to keep +// deci is Default Event Config Item from actual module version which we are using as a reference // sh is JSON Schema structure from actual module version which wa are using to check result document func mergeModuleAEventConfigItem(ceci, deci models.EventConfigItem, sh models.Schema) models.EventConfigItem { reci := models.EventConfigItem{} @@ -256,8 +256,8 @@ func mergeModuleAEventConfigItem(ceci, deci models.EventConfigItem, sh models.Sc // args: // -// cec is Current Event RemoteStorageConfig from old module version which we tried to keep -// dec is Default Event RemoteStorageConfig from actual module version which we are using as a reference +// cec is Current Event Config from old module version which we tried to keep +// dec is Default Event Config from actual module version which we are using as a reference // sh is JSON Schema structure from actual module version which wa are using to check result document func mergeModuleACurrentEventConfig(cec, dec models.EventConfig, sh models.Schema) models.EventConfig { ecsh := copySchema(&sh.Type, models.GetECSDefinitions(sh.Definitions)) @@ -291,7 +291,7 @@ func mergeModuleACurrentEventConfig(cec, dec models.EventConfig, sh models.Schem // args: // // dd is Dynamic Dependencies from old module version which we tried to keep -// cec is Current Event RemoteStorageConfig which was got after merging to default (result Current Event RemoteStorageConfig) +// cec is Current Event Config which was got after merging to default (result Current Event Config) func clearModuleADynamicDependencies(dd models.Dependencies, cec models.EventConfig) models.Dependencies { rdd := models.Dependencies{} checkDepInActions := func(ec models.EventConfigItem, moduleName string) bool { diff --git a/pkg/app/api/server/private/agents.go b/pkg/app/api/server/private/agents.go index f2b888c1..4be88f97 100644 --- a/pkg/app/api/server/private/agents.go +++ b/pkg/app/api/server/private/agents.go @@ -16,7 +16,7 @@ import ( "soldr/pkg/app/api/server/response" "soldr/pkg/app/api/storage" useraction "soldr/pkg/app/api/user_action" - "soldr/pkg/semver" + "soldr/pkg/semvertooling" ) type agentDetails struct { @@ -158,8 +158,8 @@ func getAgentConsistency(modules []models.ModuleAShort, agent *models.Agent) (bo } var sdeps bool - switch semver.CompareVersions(agent.Version, dep.MinAgentVersion) { - case semver.TargetVersionEmpty, semver.VersionsEqual, semver.SourceVersionGreat: + switch semvertooling.CompareVersions(agent.Version, dep.MinAgentVersion) { + case semvertooling.TargetVersionEmpty, semvertooling.VersionsEqual, semvertooling.SourceVersionGreat: sdeps = true default: rdeps = false diff --git a/pkg/app/api/server/private/binaries.go b/pkg/app/api/server/private/binaries.go index 14eeaee6..a9c34638 100644 --- a/pkg/app/api/server/private/binaries.go +++ b/pkg/app/api/server/private/binaries.go @@ -15,7 +15,7 @@ import ( "soldr/pkg/app/api/server/response" storage2 "soldr/pkg/app/api/storage" useraction "soldr/pkg/app/api/user_action" - "soldr/pkg/s3" + "soldr/pkg/filestorage/s3" ) type binaries struct { @@ -215,9 +215,9 @@ func (s *BinariesService) GetAgentBinaryFile(c *gin.Context) { return } - s3Client, err := s3.NewRemoteStorage(nil) + s3Client, err := s3.New(nil) if err != nil { - logger.FromContext(c).WithError(err).Errorf("error openning connection to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error openning connection to Client") response.Error(c, response.ErrInternal, err) return } diff --git a/pkg/app/api/server/private/modules.go b/pkg/app/api/server/private/modules.go index c7596360..5ffcb214 100644 --- a/pkg/app/api/server/private/modules.go +++ b/pkg/app/api/server/private/modules.go @@ -27,12 +27,14 @@ import ( "soldr/pkg/app/api/models" "soldr/pkg/app/api/modules" "soldr/pkg/app/api/server/response" - storage2 "soldr/pkg/app/api/storage" + "soldr/pkg/app/api/storage" useraction "soldr/pkg/app/api/user_action" "soldr/pkg/app/api/utils" "soldr/pkg/crypto" - "soldr/pkg/s3" - semver2 "soldr/pkg/semver" + "soldr/pkg/filestorage" + "soldr/pkg/filestorage/fs" + "soldr/pkg/filestorage/s3" + "soldr/pkg/semvertooling" ) type agentModuleDetails struct { @@ -146,13 +148,13 @@ const sqlPolicyModuleDetails = ` var modulesSQLMappers = map[string]interface{}{ "status": "`{{table}}`.status", "system": "`{{table}}`.system", - "actions": storage2.ActionsMapper, - "events": storage2.EventsMapper, - "fields": storage2.FieldsMapper, - "tags": storage2.TagsMapper, - "os": storage2.ModulesOSMapper, - "os_arch": storage2.ModulesOSArchMapper, - "os_type": storage2.ModulesOSTypeMapper, + "actions": storage.ActionsMapper, + "events": storage.EventsMapper, + "fields": storage.FieldsMapper, + "tags": storage.TagsMapper, + "os": storage.ModulesOSMapper, + "os_arch": storage.ModulesOSArchMapper, + "os_type": storage.ModulesOSTypeMapper, "version": "`{{table}}`.version", "ver_major": "`{{table}}`.ver_major", "ver_minor": "`{{table}}`.ver_minor", @@ -212,7 +214,7 @@ func removeLeadSlash(files map[string][]byte) map[string][]byte { return rfiles } -func readDir(s s3.FileStorage, path string) ([]string, error) { +func readDir(s filestorage.Storage, path string) ([]string, error) { var files []string list, err := s.ListDir(path) if err != nil { @@ -584,7 +586,7 @@ func BuildModuleSConfig(module *models.ModuleS) (map[string][]byte, error) { } func LoadModuleSTemplate(mi *models.ModuleInfo, templatesDir string) (Template, *models.ModuleS, error) { - fs, err := s3.NewLocalStorage() + fs, err := fs.New() if err != nil { return nil, nil, errors.New("failed initialize LocalStorage driver: " + err.Error()) } @@ -666,13 +668,13 @@ func LoadModuleSFromGlobalS3(mi *models.ModuleInfo) (Template, error) { func StoreModuleSToGlobalS3(mi *models.ModuleInfo, mf Template) error { s3, err := s3.New(nil) if err != nil { - return errors.New("failed initialize RemoteStorage driver: " + err.Error()) + return errors.New("failed initialize Client driver: " + err.Error()) } for _, dir := range []string{"bmodule", "cmodule", "smodule", "config"} { for fpath, fdata := range mf[dir] { if err := s3.WriteFile(joinPath(mi.Name, mi.Version.String(), dir, fpath), fdata); err != nil { - return errors.New("failed to write file to RemoteStorage: " + err.Error()) + return errors.New("failed to write file to Client: " + err.Error()) } } } @@ -683,14 +685,14 @@ func StoreModuleSToGlobalS3(mi *models.ModuleInfo, mf Template) error { func StoreCleanModuleSToGlobalS3(mi *models.ModuleInfo, mf Template) error { s3, err := s3.New(nil) if err != nil { - return errors.New("failed initialize RemoteStorage driver: " + err.Error()) + return errors.New("failed initialize Client driver: " + err.Error()) } for _, dir := range []string{"bmodule", "cmodule", "smodule", "config"} { files, _ := s3.ListDirRec(joinPath(mi.Name, mi.Version.String(), dir)) for fpath, fdata := range mf[dir] { if err := s3.WriteFile(joinPath(mi.Name, mi.Version.String(), dir, fpath), fdata); err != nil { - return errors.New("failed to write file to RemoteStorage: " + err.Error()) + return errors.New("failed to write file to Client: " + err.Error()) } } if files == nil { @@ -705,7 +707,7 @@ func StoreCleanModuleSToGlobalS3(mi *models.ModuleInfo, mf Template) error { continue } if err := s3.RemoveFile(joinPath(mi.Name, mi.Version.String(), dir, fpath)); err != nil { - return errors.New("failed to remove unused file from RemoteStorage: " + err.Error()) + return errors.New("failed to remove unused file from Client: " + err.Error()) } } } @@ -716,7 +718,7 @@ func StoreCleanModuleSToGlobalS3(mi *models.ModuleInfo, mf Template) error { func CopyModuleAFilesToInstanceS3(mi *models.ModuleInfo, sv *models.Service) error { gS3, err := s3.New(nil) if err != nil { - return errors.New("failed to initialize global RemoteStorage driver: " + err.Error()) + return errors.New("failed to initialize global Client driver: " + err.Error()) } mfiles, err := gS3.ReadDirRec(joinPath(mi.Name, mi.Version.String())) @@ -733,22 +735,22 @@ func CopyModuleAFilesToInstanceS3(mi *models.ModuleInfo, sv *models.Service) err iS3, err := s3.New(sv.Info.S3.ToS3ConnParams()) if err != nil { - return errors.New("failed to initialize instance RemoteStorage driver: " + err.Error()) + return errors.New("failed to initialize instance Client driver: " + err.Error()) } if iS3.RemoveDir(joinPath(mi.Name, mi.Version.String())); err != nil { - return errors.New("failed to remove module directory from instance RemoteStorage: " + err.Error()) + return errors.New("failed to remove module directory from instance Client: " + err.Error()) } for fpath, fdata := range mfiles { if err := iS3.WriteFile(joinPath(mi.Name, mi.Version.String(), fpath), fdata); err != nil { - return errors.New("failed to write system module file to RemoteStorage: " + err.Error()) + return errors.New("failed to write system module file to Client: " + err.Error()) } } for fpath, fdata := range ufiles { if err := iS3.WriteFile(joinPath("utils", fpath), fdata); err != nil { - return errors.New("failed to write utils file to RemoteStorage: " + err.Error()) + return errors.New("failed to write utils file to Client: " + err.Error()) } } @@ -873,7 +875,7 @@ func removeUnusedModuleVersion(c *gin.Context, iDB *gorm.DB, name, version strin if count == 0 { s3, err := s3.New(sv.Info.S3.ToS3ConnParams()) if err != nil { - logrus.WithError(err).Errorf("error openning connection to RemoteStorage") + logrus.WithError(err).Errorf("error openning connection to Client") return err } @@ -930,7 +932,7 @@ func updateDependenciesWhenModuleRemove(c *gin.Context, iDB *gorm.DB, name strin } func updatePolicyModulesByModuleS(c *gin.Context, moduleS *models.ModuleS, sv *models.Service) error { - iDB := storage2.GetDB(sv.Info.DB.User, sv.Info.DB.Pass, sv.Info.DB.Host, + iDB := storage.GetDB(sv.Info.DB.User, sv.Info.DB.Pass, sv.Info.DB.Host, strconv.Itoa(int(sv.Info.DB.Port)), sv.Info.DB.Name) if iDB == nil { logrus.Errorf("error openning connection to instance DB") @@ -959,7 +961,7 @@ func updatePolicyModulesByModuleS(c *gin.Context, moduleS *models.ModuleS, sv *m } if err := CopyModuleAFilesToInstanceS3(&moduleS.Info, sv); err != nil { - logger.FromContext(c).WithError(err).Errorf("error copying module files to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error copying module files to Client") return err } @@ -1041,8 +1043,8 @@ func clearMapKeysList(curMap, defMap map[string]interface{}) map[string]interfac // args: // -// cc is Current module RemoteStorageConfig from old module version which we tried to keep -// dc is Default module RemoteStorageConfig from actual module version which we are using as a reference +// cc is Current module Config from old module version which we tried to keep +// dc is Default module Config from actual module version which we are using as a reference // sh is JSON Schema structure from actual module version which wa are using to check result document func mergeModuleACurrentConfig(cc, dc models.ModuleConfig, sh models.Schema) models.ModuleConfig { // add new config values from default @@ -1089,8 +1091,8 @@ func mergeModuleASecureCurrentConfig(cc, dc models.ModuleSecureConfig, sh models // args: // -// cac is Current Action RemoteStorageConfig from old module version which we tried to keep -// dac is Default Action RemoteStorageConfig from actual module version which we are using as a reference +// cac is Current Action Config from old module version which we tried to keep +// dac is Default Action Config from actual module version which we are using as a reference // sh is JSON Schema structure from actual module version which wa are using to check result document func mergeModuleACurrentActionConfig(cac, dac models.ActionConfig, sh models.Schema) models.ActionConfig { for acn, daci := range dac { @@ -1124,8 +1126,8 @@ func mergeModuleACurrentActionConfig(cac, dac models.ActionConfig, sh models.Sch // args: // -// ceci is Current Event RemoteStorageConfig Item from old module version which we tried to keep -// deci is Default Event RemoteStorageConfig Item from actual module version which we are using as a reference +// ceci is Current Event Config Item from old module version which we tried to keep +// deci is Default Event Config Item from actual module version which we are using as a reference // sh is JSON Schema structure from actual module version which wa are using to check result document func mergeModuleAEventConfigItem(ceci, deci models.EventConfigItem, sh models.Schema) models.EventConfigItem { reci := models.EventConfigItem{} @@ -1141,8 +1143,8 @@ func mergeModuleAEventConfigItem(ceci, deci models.EventConfigItem, sh models.Sc // args: // -// cec is Current Event RemoteStorageConfig from old module version which we tried to keep -// dec is Default Event RemoteStorageConfig from actual module version which we are using as a reference +// cec is Current Event Config from old module version which we tried to keep +// dec is Default Event Config from actual module version which we are using as a reference // sh is JSON Schema structure from actual module version which wa are using to check result document func mergeModuleACurrentEventConfig(cec, dec models.EventConfig, sh models.Schema) models.EventConfig { ecsh := copySchema(&sh.Type, models.GetECSDefinitions(sh.Definitions)) @@ -1176,7 +1178,7 @@ func mergeModuleACurrentEventConfig(cec, dec models.EventConfig, sh models.Schem // args: // // dd is Dynamic Dependencies from old module version which we tried to keep -// cec is Current Event RemoteStorageConfig which was got after merging to default (result Current Event RemoteStorageConfig) +// cec is Current Event Config which was got after merging to default (result Current Event Config) func clearModuleADynamicDependencies(dd models.Dependencies, cec models.EventConfig) models.Dependencies { rdd := models.Dependencies{} checkDepInActions := func(ec models.EventConfigItem, moduleName string) bool { @@ -1330,7 +1332,7 @@ func (s *ModuleService) GetAgentModules(c *gin.Context) { var ( hash = c.Param("hash") pids []uint64 - query storage2.TableQuery + query storage.TableQuery resp agentModules sv *models.Service ) @@ -1552,7 +1554,6 @@ func (s *ModuleService) GetAgentBModule(c *gin.Context) { module models.ModuleA moduleName = c.Param("module_name") pids []uint64 - s3 s3.FileStorage sv *models.Service ) @@ -1613,14 +1614,14 @@ func (s *ModuleService) GetAgentBModule(c *gin.Context) { return } - s3, err = s3.NewS3(sv.Info.S3.ToS3ConnParams()) + s3Client, err := s3.New(sv.Info.S3.ToS3ConnParams()) if err != nil { - logger.FromContext(c).WithError(err).Errorf("error openning connection to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error openning connection to Client") return } path := path.Join(moduleName, module.Info.Version.String(), "bmodule", filepath) - if data, err = s3.ReadFile(path); err != nil { + if data, err = s3Client.ReadFile(path); err != nil { logger.FromContext(c).WithError(err).Errorf("error reading module file '%s'", path) return } @@ -1643,7 +1644,7 @@ func (s *ModuleService) GetGroupModules(c *gin.Context) { gps models.GroupPolicies hash = c.Param("hash") pids []uint64 - query storage2.TableQuery + query storage.TableQuery resp groupModules sv *models.Service ) @@ -1866,7 +1867,6 @@ func (s *ModuleService) GetGroupBModule(c *gin.Context) { module models.ModuleA moduleName = c.Param("module_name") pids []uint64 - s3 s3.FileStorage sv *models.Service ) @@ -1926,14 +1926,14 @@ func (s *ModuleService) GetGroupBModule(c *gin.Context) { return } - s3, err = s3.NewS3(sv.Info.S3.ToS3ConnParams()) + s3Client, err := s3.New(sv.Info.S3.ToS3ConnParams()) if err != nil { - logger.FromContext(c).WithError(err).Errorf("error openning connection to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error openning connection to Client") return } path := path.Join(moduleName, module.Info.Version.String(), "bmodule", filepath) - if data, err = s3.ReadFile(path); err != nil { + if data, err = s3Client.ReadFile(path); err != nil { logger.FromContext(c).WithError(err).Errorf("error reading module file '%s'", path) return } @@ -1957,7 +1957,7 @@ func (s *ModuleService) GetPolicyModules(c *gin.Context) { modulesA []models.ModuleA modulesS []models.ModuleS policy models.Policy - query storage2.TableQuery + query storage.TableQuery resp policyModules sv *models.Service ) @@ -2004,7 +2004,7 @@ func (s *ModuleService) GetPolicyModules(c *gin.Context) { queryA := query queryA.Page = 0 queryA.Size = 0 - queryA.Filters = []storage2.TableFilter{} + queryA.Filters = []storage.TableFilter{} queryA.Init("modules", modulesSQLMappers) queryA.SetFilters([]func(db *gorm.DB) *gorm.DB{ func(db *gorm.DB) *gorm.DB { @@ -2193,7 +2193,6 @@ func (s *ModuleService) GetPolicyBModule(c *gin.Context) { module models.ModuleA moduleName = c.Param("module_name") policy models.Policy - s3 s3.FileStorage sv *models.Service ) @@ -2233,14 +2232,14 @@ func (s *ModuleService) GetPolicyBModule(c *gin.Context) { return } - s3, err = s3.NewS3(sv.Info.S3.ToS3ConnParams()) + s3Client, err := s3.New(sv.Info.S3.ToS3ConnParams()) if err != nil { - logger.FromContext(c).WithError(err).Errorf("error openning connection to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error openning connection to Client") return } path := path.Join(moduleName, module.Info.Version.String(), "bmodule", filepath) - if data, err = s3.ReadFile(path); err != nil { + if data, err = s3Client.ReadFile(path); err != nil { logger.FromContext(c).WithError(err).Errorf("error reading module file '%s'", path) return } @@ -2376,7 +2375,7 @@ func (s *ModuleService) PatchPolicyModule(c *gin.Context) { if moduleA.ID == 0 { if err = CopyModuleAFilesToInstanceS3(&moduleA.Info, sv); err != nil { - logger.FromContext(c).WithError(err).Errorf("error copying module files to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error copying module files to Client") response.Error(c, response.ErrInternal, err) return } @@ -2479,7 +2478,7 @@ func (s *ModuleService) PatchPolicyModule(c *gin.Context) { } if err = CopyModuleAFilesToInstanceS3(&moduleA.Info, sv); err != nil { - logger.FromContext(c).WithError(err).Errorf("error copying module files to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error copying module files to Client") response.Error(c, response.ErrInternal, err) return } @@ -2908,7 +2907,7 @@ func (s *ModuleService) GetPolicyModuleSecureConfigValue(c *gin.Context) { // @Router /modules/ [get] func (s *ModuleService) GetModules(c *gin.Context) { var ( - query storage2.TableQuery + query storage.TableQuery sv *models.Service resp systemModules useVersion bool @@ -3069,7 +3068,7 @@ func (s *ModuleService) CreateModule(c *gin.Context) { } if err = StoreModuleSToGlobalS3(&info, template); err != nil { - logger.FromContext(c).WithError(err).Errorf("error storing module to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error storing module to Client") response.Error(c, response.ErrCreateModuleStoreS3Fail, err) return } @@ -3088,7 +3087,7 @@ func (s *ModuleService) CreateModule(c *gin.Context) { } // DeleteModule is a function to cascade delete system module -// @Summary Delete system module from all DBs and RemoteStorage storage +// @Summary Delete system module from all DBs and Client storage // @Tags Modules // @Produce json // @Param module_name path string true "module name without spaces" @@ -3102,7 +3101,6 @@ func (s *ModuleService) DeleteModule(c *gin.Context) { err error modules []models.ModuleS moduleName = c.Param("module_name") - s3 s3.FileStorage sv *models.Service services []models.Service ) @@ -3135,10 +3133,9 @@ func (s *ModuleService) DeleteModule(c *gin.Context) { var ( err error modules []models.ModuleA - s3 s3.IStorage ) - iDB := storage2.GetDB(s.Info.DB.User, s.Info.DB.Pass, s.Info.DB.Host, + iDB := storage.GetDB(s.Info.DB.User, s.Info.DB.Pass, s.Info.DB.Host, strconv.Itoa(int(s.Info.DB.Port)), s.Info.DB.Name) if iDB == nil { logger.FromContext(c).Errorf("error openning connection to instance DB") @@ -3156,13 +3153,13 @@ func (s *ModuleService) DeleteModule(c *gin.Context) { return err } - s3, err = s3.NewS3(sv.Info.S3.ToS3ConnParams()) + s3Client, err := s3.New(sv.Info.S3.ToS3ConnParams()) if err != nil { - logger.FromContext(c).WithError(err).Errorf("error openning connection to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error openning connection to Client") return err } - if err = s3.RemoveDir(moduleName + "/"); err != nil && err.Error() != "not found" { + if err = s3Client.RemoveDir(moduleName + "/"); err != nil && err.Error() != "not found" { logger.FromContext(c).WithError(err).Errorf("error removing modules files") return err } @@ -3189,13 +3186,14 @@ func (s *ModuleService) DeleteModule(c *gin.Context) { return } - if s3, err = s3.NewS3(nil); err != nil { - logger.FromContext(c).WithError(err).Errorf("error openning connection to RemoteStorage") + s3Client, err := s3.New(nil) + if err != nil { + logger.FromContext(c).WithError(err).Errorf("error openning connection to Client") response.Error(c, response.ErrInternal, err) return } - if err = s3.RemoveDir(moduleName + "/"); err != nil && err.Error() != "not found" { + if err = s3Client.RemoveDir(moduleName + "/"); err != nil && err.Error() != "not found" { logger.FromContext(c).WithError(err).Errorf("error removing system modules files") response.Error(c, response.ErrDeleteModuleDeleteFilesFail, err) return @@ -3218,7 +3216,7 @@ func (s *ModuleService) DeleteModule(c *gin.Context) { func (s *ModuleService) GetModuleVersions(c *gin.Context) { var ( moduleName = c.Param("module_name") - query storage2.TableQuery + query storage.TableQuery sv *models.Service resp systemShortModules ) @@ -3327,7 +3325,7 @@ func (s *ModuleService) GetModuleVersion(c *gin.Context) { } // PatchModuleVersion is a function to update system module by name and version -// @Summary Update the version of system module to global DB and global RemoteStorage storage +// @Summary Update the version of system module to global DB and global Client storage // @Tags Modules // @Accept json // @Produce json @@ -3446,7 +3444,7 @@ func (s *ModuleService) PatchModuleVersion(c *gin.Context) { template["config"] = cfiles if err = StoreModuleSToGlobalS3(&form.Module.Info, template); err != nil { - logger.FromContext(c).WithError(err).Errorf("error storing system module files to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error storing system module files to Client") response.Error(c, response.ErrPatchModuleVersionUpdateS3Fail, err) return } @@ -3573,8 +3571,8 @@ func (s *ModuleService) CreateModuleVersion(c *gin.Context) { return } - switch semver2.CompareVersions(module.Info.Version.String(), version) { - case semver2.TargetVersionGreat: + switch semvertooling.CompareVersions(module.Info.Version.String(), version) { + case semvertooling.TargetVersionGreat: default: logger.FromContext(c).Errorf("error validating new version '%s' -> '%s'", module.Info.Version.String(), version) @@ -3609,7 +3607,7 @@ func (s *ModuleService) CreateModuleVersion(c *gin.Context) { template["config"] = cfiles if err = StoreModuleSToGlobalS3(&module.Info, template); err != nil { - logger.FromContext(c).WithError(err).Errorf("error storing module to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error storing module to Client") response.Error(c, response.ErrCreateModuleVersionStoreS3Fail, err) return } @@ -3630,7 +3628,7 @@ func (s *ModuleService) CreateModuleVersion(c *gin.Context) { } // DeleteModuleVersion is a function to delete the version system module -// @Summary Delete the version system module from global DB and global RemoteStorage storage +// @Summary Delete the version system module from global DB and global Client storage // @Tags Modules // @Produce json // @Param module_name path string true "module name without spaces" @@ -3695,7 +3693,7 @@ func (s *ModuleService) DeleteModuleVersion(c *gin.Context) { s3, err := s3.New(nil) if err != nil { - logger.FromContext(c).WithError(err).Errorf("error openning connection to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error openning connection to Client") response.Error(c, response.ErrInternal, err) return } @@ -3902,16 +3900,16 @@ func (s *ModuleService) GetModuleVersionFiles(c *gin.Context) { return } - s3, err := s3.New(nil) + s3Client, err := s3.New(nil) if err != nil { - logger.FromContext(c).WithError(err).Errorf("error openning connection to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error openning connection to Client") response.Error(c, response.ErrInternal, err) return } path := moduleName + "/" + module.Info.Version.String() - if files, err = readDir(s3, path); err != nil { - logger.FromContext(c).WithError(err).Errorf("error listening module files from RemoteStorage") + if files, err = readDir(s3Client, path); err != nil { + logger.FromContext(c).WithError(err).Errorf("error listening module files from Client") response.Error(c, response.ErrGetModuleVersionFilesListenFail, err) return } @@ -3968,7 +3966,7 @@ func (s *ModuleService) GetModuleVersionFile(c *gin.Context) { s3, err := s3.New(nil) if err != nil { - logger.FromContext(c).WithError(err).Errorf("error openning connection to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error openning connection to Client") response.Error(c, response.ErrInternal, err) return } @@ -4051,7 +4049,7 @@ func (s *ModuleService) PatchModuleVersionFile(c *gin.Context) { s3, err := s3.New(nil) if err != nil { - logger.FromContext(c).WithError(err).Errorf("error openning connection to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error openning connection to Client") response.Error(c, response.ErrInternal, err) return } @@ -4078,14 +4076,14 @@ func (s *ModuleService) PatchModuleVersionFile(c *gin.Context) { } if err = s3.WriteFile(form.Path, data); err != nil { - logger.FromContext(c).WithError(err).Errorf("error writing file data to RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error writing file data to Client") response.Error(c, response.ErrPatchModuleVersionFileWriteModuleFileFail, err) return } case "remove": if err = s3.Remove(form.Path); err != nil { - logger.FromContext(c).WithError(err).Errorf("error removing file from RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error removing file from Client") response.Error(c, response.ErrPatchModuleVersionFileWriteModuleObjectFail, err) return } @@ -4098,7 +4096,7 @@ func (s *ModuleService) PatchModuleVersionFile(c *gin.Context) { } if info, err = s3.GetInfo(form.Path); err != nil { - logger.FromContext(c).WithError(err).Errorf("error getting file info from RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error getting file info from Client") response.Error(c, response.ErrPatchModuleVersionFileObjectNotFound, err) return } else if !info.IsDir() { @@ -4107,13 +4105,13 @@ func (s *ModuleService) PatchModuleVersionFile(c *gin.Context) { } if form.Path == form.NewPath { - logger.FromContext(c).Errorf("error moving file in RemoteStorage: newpath is identical to path") + logger.FromContext(c).Errorf("error moving file in Client: newpath is identical to path") response.Error(c, response.ErrPatchModuleVersionFilePathIdentical, nil) return } if err = s3.Rename(form.Path, form.NewPath); err != nil { - logger.FromContext(c).WithError(err).Errorf("error renaming file in RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error renaming file in Client") response.Error(c, response.ErrPatchModuleVersionFileObjectMoveFail, err) return } @@ -4126,13 +4124,13 @@ func (s *ModuleService) PatchModuleVersionFile(c *gin.Context) { } if form.Path == form.NewPath { - logger.FromContext(c).Errorf("error moving file in RemoteStorage: newpath is identical to path") + logger.FromContext(c).Errorf("error moving file in Client: newpath is identical to path") response.Error(c, response.ErrPatchModuleVersionFilePathIdentical, nil) return } if files, err = s3.ListDirRec(form.Path); err != nil { - logger.FromContext(c).WithError(err).Errorf("error getting files by path from RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error getting files by path from Client") response.Error(c, response.ErrPatchModuleVersionFileGetFilesFail, err) return } @@ -4142,7 +4140,7 @@ func (s *ModuleService) PatchModuleVersionFile(c *gin.Context) { curfile := filepath.Join(form.Path, obj) newfile := filepath.Join(form.NewPath, obj) if err = s3.Rename(curfile, newfile); err != nil { - logger.FromContext(c).WithError(err).Errorf("error moving file in RemoteStorage") + logger.FromContext(c).WithError(err).Errorf("error moving file in Client") response.Error(c, response.ErrPatchModuleVersionFileObjectMoveFail, err) return } @@ -4166,7 +4164,7 @@ func (s *ModuleService) PatchModuleVersionFile(c *gin.Context) { } // GetModuleVersionOption is a function to return option of system module rendered on server side -// @Summary Retrieve rendered Event RemoteStorageConfig Schema of system module data by module name and version +// @Summary Retrieve rendered Event Config Schema of system module data by module name and version // @Tags Modules // @Produce json // @Param module_name path string true "module name without spaces" diff --git a/pkg/app/api/server/private/policies.go b/pkg/app/api/server/private/policies.go index 199dc6eb..19ea1878 100644 --- a/pkg/app/api/server/private/policies.go +++ b/pkg/app/api/server/private/policies.go @@ -15,7 +15,7 @@ import ( "soldr/pkg/app/api/storage" useraction "soldr/pkg/app/api/user_action" "soldr/pkg/app/api/utils" - "soldr/pkg/semver" + "soldr/pkg/semvertooling" ) type policyDetails struct { @@ -116,8 +116,8 @@ func getPolicyConsistency(modules []models.ModuleAShort) (bool, []models.PolicyD return false } } - switch semver.CompareVersions(mod.Info.Version.String(), dep.MinModuleVersion) { - case semver.TargetVersionEmpty, semver.VersionsEqual, semver.SourceVersionGreat: + switch semvertooling.CompareVersions(mod.Info.Version.String(), dep.MinModuleVersion) { + case semvertooling.TargetVersionEmpty, semvertooling.VersionsEqual, semvertooling.SourceVersionGreat: return true default: return false diff --git a/pkg/app/api/server/private/upgrades.go b/pkg/app/api/server/private/upgrades.go index 2821f18e..955b1331 100644 --- a/pkg/app/api/server/private/upgrades.go +++ b/pkg/app/api/server/private/upgrades.go @@ -22,7 +22,8 @@ import ( "soldr/pkg/app/api/server/response" storage2 "soldr/pkg/app/api/storage" useraction "soldr/pkg/app/api/user_action" - "soldr/pkg/s3" + "soldr/pkg/filestorage" + "soldr/pkg/filestorage/s3" ) type upgradeAgentDetails struct { @@ -249,14 +250,14 @@ func (s *UpgradeService) CreateAgentsUpgrades(c *gin.Context) { } if sqlInsertResult.RowsAffected != 0 { - s3, err := s3.New(sv.Info.S3.ToS3ConnParams()) + s3Client, err := s3.New(sv.Info.S3.ToS3ConnParams()) if err != nil { logger.FromContext(c).WithError(err).Errorf("error openning connection to RemoteStorage") response.Error(c, response.ErrInternal, err) return } - if err = uploadAgentBinariesToInstBucket(binary, s3); err != nil { + if err = uploadAgentBinariesToInstBucket(binary, s3Client); err != nil { logger.FromContext(c).WithError(err).Errorf("error uploading agent binaries to RemoteStorage instance bucket") response.Error(c, response.ErrCreateAgentsUpgradesUpdateAgentBinariesFail, err) return @@ -439,14 +440,14 @@ func (s *UpgradeService) PatchLastAgentUpgrade(c *gin.Context) { return } - s3, err := s3.New(sv.Info.S3.ToS3ConnParams()) + s3Client, err := s3.New(sv.Info.S3.ToS3ConnParams()) if err != nil { logger.FromContext(c).WithError(err).Errorf("error openning connection to RemoteStorage") response.Error(c, response.ErrInternal, err) return } - if err = uploadAgentBinariesToInstBucket(binary, s3); err != nil { + if err = uploadAgentBinariesToInstBucket(binary, s3Client); err != nil { logger.FromContext(c).WithError(err).Errorf("error uploading agent binaries to RemoteStorage instance bucket") response.Error(c, response.ErrPatchLastAgentUpgradeUpdateAgentBinariesFail, err) return @@ -476,7 +477,7 @@ func (s *UpgradeService) PatchLastAgentUpgrade(c *gin.Context) { } // uploadAgentBinariesToInstBucket is function to check and upload agent binaries to RemoteStorage instance bucket -func uploadAgentBinariesToInstBucket(binary models.Binary, iS3 s3.FileStorage) error { +func uploadAgentBinariesToInstBucket(binary models.Binary, iS3 filestorage.Storage) error { joinPath := func(args ...string) string { tpath := filepath.Join(args...) return strings.Replace(tpath, "\\", "/", -1) diff --git a/pkg/app/api/storage/mem/connection.go b/pkg/app/api/storage/mem/connection.go index 83943189..72ea56b3 100644 --- a/pkg/app/api/storage/mem/connection.go +++ b/pkg/app/api/storage/mem/connection.go @@ -6,7 +6,7 @@ import ( "github.com/jinzhu/gorm" - "soldr/pkg/s3" + "soldr/pkg/filestorage" ) type DBConnectionStorage struct { @@ -39,16 +39,16 @@ func (s *DBConnectionStorage) Set(hash string, db *gorm.DB) { type S3ConnectionStorage struct { mu sync.RWMutex // protects map below // TODO: store RemoteStorage struct instead of interface - store map[string]s3.FileStorage + store map[string]filestorage.Storage } func NewS3ConnectionStorage() *S3ConnectionStorage { return &S3ConnectionStorage{ - store: make(map[string]s3.FileStorage), + store: make(map[string]filestorage.Storage), } } -func (s *S3ConnectionStorage) Get(hash string) (s3.FileStorage, error) { +func (s *S3ConnectionStorage) Get(hash string) (filestorage.Storage, error) { s.mu.RLock() defer s.mu.RUnlock() conn, found := s.store[hash] @@ -58,7 +58,7 @@ func (s *S3ConnectionStorage) Get(hash string) (s3.FileStorage, error) { return conn, nil } -func (s *S3ConnectionStorage) Set(hash string, s3 s3.FileStorage) { +func (s *S3ConnectionStorage) Set(hash string, s3 filestorage.Storage) { s.mu.Lock() s.store[hash] = s3 s.mu.Unlock() diff --git a/pkg/app/server/certs/config/config.go b/pkg/app/server/certs/config/config.go index 24461b33..e8481a36 100644 --- a/pkg/app/server/certs/config/config.go +++ b/pkg/app/server/certs/config/config.go @@ -1,7 +1,7 @@ package config import ( - "soldr/pkg/s3" + "soldr/pkg/filestorage" ) type Config struct { @@ -9,6 +9,6 @@ type Config struct { } type StaticProvider struct { - Reader s3.FileReader + Reader filestorage.Reader CertsDir string } diff --git a/pkg/app/server/mmodule/hardening/cache/connector.go b/pkg/app/server/mmodule/hardening/cache/connector.go index b6994096..51580806 100644 --- a/pkg/app/server/mmodule/hardening/cache/connector.go +++ b/pkg/app/server/mmodule/hardening/cache/connector.go @@ -6,13 +6,13 @@ import ( "github.com/jinzhu/gorm" - "soldr/pkg/s3" + "soldr/pkg/filestorage" ) type ( fetchData func(ctx context.Context, connector interface{}) (interface{}, error) FetchDataFromDB func(ctx context.Context, connector *gorm.DB) (interface{}, error) - FetchDataFromFile func(ctx context.Context, connector s3.FileReader) (interface{}, error) + FetchDataFromFile func(ctx context.Context, connector filestorage.Reader) (interface{}, error) ) type ConnectorParams struct { @@ -48,7 +48,7 @@ func chooseFetcher(connector interface{}, initParams *ConnectorParams) (fetchDat switch connector.(type) { case *gorm.DB: fetch, err = getDBFetcher(initParams.DBFetcher) - case s3.FileReader: + case filestorage.Reader: fetch, err = getFileFetcher(initParams.FileFetcher) default: return nil, fmt.Errorf("a store of an unknown type passed") @@ -84,9 +84,9 @@ func getFileFetcher(fetcher FetchDataFromFile) (fetchData, error) { return nil, fmt.Errorf("fileStore fetcher function has not been implemented") } return func(ctx context.Context, connector interface{}) (interface{}, error) { - fileStore, ok := connector.(s3.FileStorage) + fileStore, ok := connector.(filestorage.Storage) if !ok { - return nil, fmt.Errorf("passed connector in not of the type FileStorage") + return nil, fmt.Errorf("passed connector in not of the type Storage") } if fileStore == nil { return nil, fmt.Errorf("passed connector object is nil") diff --git a/pkg/app/server/mmodule/hardening/conn_validator_factory.go b/pkg/app/server/mmodule/hardening/conn_validator_factory.go index 070aff9a..08df8773 100644 --- a/pkg/app/server/mmodule/hardening/conn_validator_factory.go +++ b/pkg/app/server/mmodule/hardening/conn_validator_factory.go @@ -10,7 +10,7 @@ import ( "soldr/pkg/app/server/certs" "soldr/pkg/app/server/mmodule/hardening/v1/approver" v1Validator "soldr/pkg/app/server/mmodule/hardening/v1/validator" - "soldr/pkg/s3" + "soldr/pkg/filestorage" "soldr/pkg/vxproto" ) @@ -21,7 +21,7 @@ type ConnectionValidatorFactory struct { func NewConnectionValidatorFactory( ctx context.Context, gdbc *gorm.DB, - fs s3.FileReader, + fs filestorage.Reader, store interface{}, basePath string, certsProvider certs.Provider, diff --git a/pkg/app/server/mmodule/hardening/v1/abher/abh_list.go b/pkg/app/server/mmodule/hardening/v1/abher/abh_list.go index 59cdc1c7..9a9fdcfd 100644 --- a/pkg/app/server/mmodule/hardening/v1/abher/abh_list.go +++ b/pkg/app/server/mmodule/hardening/v1/abher/abh_list.go @@ -13,7 +13,7 @@ import ( "soldr/pkg/app/api/models" "soldr/pkg/app/server/mmodule/hardening/cache" - "soldr/pkg/s3" + "soldr/pkg/filestorage" "soldr/pkg/vxproto" ) @@ -112,7 +112,7 @@ func (l *ABHList) UnmarshalJSON(data []byte) error { } func getABHListFromFile(basePath string, dst *ABHList) cache.FetchDataFromFile { - return func(ctx context.Context, connector s3.FileReader) (interface{}, error) { + return func(ctx context.Context, connector filestorage.Reader) (interface{}, error) { filePath := path.Join(basePath, "hardening", "abh.json") file, err := connector.ReadFile(filePath) if err != nil { diff --git a/pkg/app/server/mmodule/hardening/v1/sbher/sbher.go b/pkg/app/server/mmodule/hardening/v1/sbher/sbher.go index 5d676634..7e9243df 100644 --- a/pkg/app/server/mmodule/hardening/v1/sbher/sbher.go +++ b/pkg/app/server/mmodule/hardening/v1/sbher/sbher.go @@ -10,7 +10,7 @@ import ( "github.com/sirupsen/logrus" cache2 "soldr/pkg/app/server/mmodule/hardening/cache" - "soldr/pkg/s3" + "soldr/pkg/filestorage" ) type SBH struct { @@ -74,7 +74,7 @@ type SBHFileData map[string][]byte func getFileFetcher(basePath string) cache2.FetchDataFromFile { const sbhFileName = "sbh.json" - return func(ctx context.Context, connector s3.FileReader) (interface{}, error) { + return func(ctx context.Context, connector filestorage.Reader) (interface{}, error) { sbhFilePath := path.Join(basePath, "lic", sbhFileName) data, err := connector.ReadFile(sbhFilePath) if err != nil { diff --git a/pkg/app/server/mmodule/hardening/v1/validator/validator.go b/pkg/app/server/mmodule/hardening/v1/validator/validator.go index 0d6a136a..6f8cf1c8 100644 --- a/pkg/app/server/mmodule/hardening/v1/validator/validator.go +++ b/pkg/app/server/mmodule/hardening/v1/validator/validator.go @@ -16,8 +16,8 @@ import ( "soldr/pkg/app/server/mmodule/hardening/v1/ssa" tunnelConfigurer "soldr/pkg/app/server/mmodule/hardening/v1/tunnel" "soldr/pkg/app/server/mmodule/hardening/v1/validator/ainfo" + "soldr/pkg/filestorage" "soldr/pkg/protoagent" - "soldr/pkg/s3" "soldr/pkg/vxproto" "soldr/pkg/vxproto/tunnel" ) @@ -61,7 +61,7 @@ type ConnectionValidator struct { func NewConnectionValidator( ctx context.Context, gdbc *gorm.DB, - fs s3.FileReader, + fs filestorage.Reader, store interface{}, basePath string, certsProvider certs.Provider, diff --git a/pkg/app/server/mmodule/mmodule.go b/pkg/app/server/mmodule/mmodule.go index 6ec5b610..276f8fee 100644 --- a/pkg/app/server/mmodule/mmodule.go +++ b/pkg/app/server/mmodule/mmodule.go @@ -32,11 +32,12 @@ import ( "soldr/pkg/app/server/mmodule/hardening/v1/crypto" "soldr/pkg/app/server/mmodule/upgrader/store" "soldr/pkg/controller" + "soldr/pkg/filestorage" + "soldr/pkg/filestorage/fs" "soldr/pkg/loader" "soldr/pkg/lua" obs "soldr/pkg/observability" "soldr/pkg/protoagent" - "soldr/pkg/s3" "soldr/pkg/system" utilsErrors "soldr/pkg/utils/errors" "soldr/pkg/vxproto" @@ -63,7 +64,7 @@ type MainModule struct { cnt controller.IController gdbc *gorm.DB validator *validator.Validate - store s3.FileStorage + store filestorage.Storage agents *agentList groups *groupList listen string @@ -1808,7 +1809,7 @@ func New( cl controller.IConfigLoader, fl controller.IFilesLoader, gdb *gorm.DB, - store s3.FileStorage, + store filestorage.Storage, certsProvider certs.Provider, version string, connectionValidatorConf *hardeningConfig.Validator, @@ -1867,7 +1868,7 @@ func New( return mm, fmt.Errorf("failed to initialize the update task consumer submodule: %w", err) } var validatorStore interface{} - fsStore, err := s3.NewLocalStorage() + fsStore, err := fs.New() if err != nil { return mm, fmt.Errorf("failed to initialize an LocalStorage store: %w", err) } diff --git a/pkg/app/server/mmodule/upgrader/cache/cache.go b/pkg/app/server/mmodule/upgrader/cache/cache.go index 8f113075..1be7a6e9 100644 --- a/pkg/app/server/mmodule/upgrader/cache/cache.go +++ b/pkg/app/server/mmodule/upgrader/cache/cache.go @@ -7,7 +7,7 @@ import ( "sync" "time" - "soldr/pkg/s3" + "soldr/pkg/filestorage" "soldr/pkg/vxproto" ) @@ -45,7 +45,7 @@ type Item struct { } type Cache struct { - store s3.FileStorage + store filestorage.Storage cache map[Key]*Item cacheMux *sync.Mutex tracker *lruTracker @@ -58,7 +58,7 @@ const ( cacheSize = 6 ) -func NewCache(store s3.FileStorage) (*Cache, error) { +func NewCache(store filestorage.Storage) (*Cache, error) { tracker, err := newLRUTracker(cacheSize) if err != nil { return nil, err diff --git a/pkg/controller/config.go b/pkg/controller/config.go index 339e401e..17e4f4dc 100644 --- a/pkg/controller/config.go +++ b/pkg/controller/config.go @@ -4,7 +4,8 @@ import ( "fmt" "soldr/pkg/db" - "soldr/pkg/s3" + "soldr/pkg/filestorage/fs" + "soldr/pkg/filestorage/s3" ) type getCallback func() string @@ -229,7 +230,7 @@ func NewConfigFromDB(dsn *db.DSN) (IConfigLoader, error) { } // NewConfigFromS3 is function which constructed Configuration loader object -func NewConfigFromS3(connParams *s3.RemoteStorageConfig) (IConfigLoader, error) { +func NewConfigFromS3(connParams *s3.Config) (IConfigLoader, error) { sc, err := s3.New(connParams) if err != nil { return nil, generateDriverInitErrMsg(driverTypeS3, err) @@ -242,7 +243,7 @@ func NewConfigFromS3(connParams *s3.RemoteStorageConfig) (IConfigLoader, error) // NewConfigFromFS is function which constructed Configuration loader object func NewConfigFromFS(path string) (IConfigLoader, error) { - sc, err := s3.NewLocalStorage() + sc, err := fs.New() if err != nil { return nil, generateDriverInitErrMsg(driverTypeFS, err) } @@ -263,7 +264,7 @@ func generateDriverInitErrMsg(t driverType, originalErr error) error { var driverName string switch t { case driverTypeS3: - driverName = "RemoteStorage" + driverName = "Client" case driverTypeFS: driverName = "LocalStorage" default: diff --git a/pkg/controller/files.go b/pkg/controller/files.go index e0c3101d..b122c59b 100755 --- a/pkg/controller/files.go +++ b/pkg/controller/files.go @@ -1,7 +1,8 @@ package controller import ( - "soldr/pkg/s3" + "soldr/pkg/filestorage/fs" + "soldr/pkg/filestorage/s3" ) // sFiles is universal container for modules files loader @@ -11,7 +12,7 @@ type sFiles struct { } // NewFilesFromS3 is function which constructed Files loader object -func NewFilesFromS3(connParams *s3.RemoteStorageConfig) (IFilesLoader, error) { +func NewFilesFromS3(connParams *s3.Config) (IFilesLoader, error) { sc, err := s3.New(connParams) if err != nil { return nil, generateDriverInitErrMsg(driverTypeS3, err) @@ -24,7 +25,7 @@ func NewFilesFromS3(connParams *s3.RemoteStorageConfig) (IFilesLoader, error) { // NewFilesFromFS is function which constructed Files loader object func NewFilesFromFS(path string) (IFilesLoader, error) { - sc, err := s3.NewLocalStorage() + sc, err := fs.New() if err != nil { return nil, generateDriverInitErrMsg(driverTypeFS, err) } diff --git a/pkg/controller/loader.go b/pkg/controller/loader.go index f0c417f8..daa201c5 100644 --- a/pkg/controller/loader.go +++ b/pkg/controller/loader.go @@ -8,8 +8,8 @@ import ( "time" "soldr/pkg/db" + "soldr/pkg/filestorage" "soldr/pkg/loader" - "soldr/pkg/s3" ) // tConfigLoaderType is type for loading config @@ -165,7 +165,7 @@ func (cl *configLoaderDB) load() ([]*loader.ModuleConfig, error) { return ml, nil } -func readConfig(s s3.FileStorage, path string) ([]*loader.ModuleConfig, error) { +func readConfig(s filestorage.Storage, path string) ([]*loader.ModuleConfig, error) { var mcl []*loader.ModuleConfig if s.IsNotExist(path) { return nil, fmt.Errorf("the config directory '%s' not found", path) @@ -184,7 +184,7 @@ func readConfig(s s3.FileStorage, path string) ([]*loader.ModuleConfig, error) { return mcl, nil } -func writeConfig(s s3.FileStorage, path string, mcl []*loader.ModuleConfig) error { +func writeConfig(s filestorage.Storage, path string, mcl []*loader.ModuleConfig) error { if s.IsNotExist(path) { return fmt.Errorf("config directory '%s' not found", path) } @@ -216,7 +216,7 @@ func parsePathToFile(mpath string) (string, string, error) { return mname, bpath, nil } -func getStorageCb(s s3.FileStorage, mpath, file string) getCallback { +func getStorageCb(s filestorage.Storage, mpath, file string) getCallback { return func() string { data, err := s.ReadFile(joinPath(mpath, file)) if err == nil { @@ -227,7 +227,7 @@ func getStorageCb(s s3.FileStorage, mpath, file string) getCallback { } } -func setStorageCb(s s3.FileStorage, mpath, file string) setCallback { +func setStorageCb(s filestorage.Storage, mpath, file string) setCallback { return func(val string) bool { mname, bpath, err := parsePathToFile(mpath) if err != nil { @@ -250,7 +250,7 @@ func setStorageCb(s s3.FileStorage, mpath, file string) setCallback { } } -func loadConfig(s s3.FileStorage, path string) ([]*loader.ModuleConfig, error) { +func loadConfig(s filestorage.Storage, path string) ([]*loader.ModuleConfig, error) { mcl, err := readConfig(s, path) if err != nil { return nil, err @@ -287,7 +287,7 @@ func loadConfig(s s3.FileStorage, path string) ([]*loader.ModuleConfig, error) { // configLoaderS3 is container for config which loaded from D3 type configLoaderS3 struct { - sc s3.FileStorage + sc filestorage.Storage } // load is function what retrieve modules config list from RemoteStorage @@ -298,7 +298,7 @@ func (cl *configLoaderS3) load() ([]*loader.ModuleConfig, error) { // configLoaderFS is container for config which loaded from LocalStorage type configLoaderFS struct { path string - sc s3.FileStorage + sc filestorage.Storage } // load is function what retrieve modules config list from LocalStorage @@ -314,7 +314,7 @@ func removeLeadSlash(files map[string][]byte) map[string][]byte { return rfiles } -func loadUtils(s s3.FileStorage, path string) (map[string][]byte, error) { +func loadUtils(s filestorage.Storage, path string) (map[string][]byte, error) { var err error upath := joinPath(path, "utils") if s.IsNotExist(upath) { @@ -330,7 +330,7 @@ func loadUtils(s s3.FileStorage, path string) (map[string][]byte, error) { return files, nil } -func loadFiles(s s3.FileStorage, path string, mcl []*loader.ModuleConfig) ([]*loader.ModuleFiles, error) { +func loadFiles(s filestorage.Storage, path string, mcl []*loader.ModuleConfig) ([]*loader.ModuleFiles, error) { var mfl []*loader.ModuleFiles if s.IsNotExist(path) { return nil, fmt.Errorf("modules directory '%s' not found", path) @@ -388,7 +388,7 @@ func loadFiles(s s3.FileStorage, path string, mcl []*loader.ModuleConfig) ([]*lo // filesLoaderS3 is container for files structure which loaded from RemoteStorage type filesLoaderS3 struct { - sc s3.FileStorage + sc filestorage.Storage } // load is function what retrieve modules files data from RemoteStorage @@ -403,7 +403,7 @@ func (fl *filesLoaderS3) load(mcl []*loader.ModuleConfig) ([]*loader.ModuleFiles // filesLoaderFS is container for files structure which loaded from LocalStorage type filesLoaderFS struct { path string - sc s3.FileStorage + sc filestorage.Storage } // load is function what retrieve modules files data from LocalStorage diff --git a/pkg/s3/structs.go b/pkg/filestorage/file.go similarity index 55% rename from pkg/s3/structs.go rename to pkg/filestorage/file.go index 48b42a11..133208e3 100755 --- a/pkg/s3/structs.go +++ b/pkg/filestorage/file.go @@ -1,13 +1,10 @@ -package s3 +package filestorage import ( "errors" "os" "path/filepath" "strings" - "time" - - "github.com/minio/minio-go/v7" ) // Error list of storage package @@ -27,8 +24,8 @@ var ( ErrRenameFailed = errors.New("can't rename") ) -// FileStorage is main interface for using external storages -type FileStorage interface { +// Storage is main interface for using external storages +type Storage interface { ListDir(path string) (map[string]os.FileInfo, error) ListDirRec(path string) (map[string]os.FileInfo, error) GetInfo(path string) (os.FileInfo, error) @@ -45,16 +42,16 @@ type FileStorage interface { Remove(path string) error Rename(old, new string) error CopyFile(src, dst string) error - FileReader - FileLimiter + Reader + Limiter } -type FileReader interface { +type Reader interface { ReadFile(path string) ([]byte, error) } -// FileLimiter is additional interface for limits control -type FileLimiter interface { +// Limiter is additional interface for limits control +type Limiter interface { DefPerm() os.FileMode SetDefPerm(perm os.FileMode) MaxFileSize() int64 @@ -65,92 +62,51 @@ type FileLimiter interface { SetMaxNumObjs(max int64) } -type fileLimits struct { +type Limits struct { defPerm os.FileMode maxFileSize int64 maxReadSize int64 maxNumObjs int64 } -func (l *fileLimits) DefPerm() os.FileMode { +func NewLimits(defPerm os.FileMode, maxFileSize int64, maxReadSize int64, maxNumObjs int64) *Limits { + return &Limits{defPerm: defPerm, maxFileSize: maxFileSize, maxReadSize: maxReadSize, maxNumObjs: maxNumObjs} +} + +func (l *Limits) DefPerm() os.FileMode { return l.defPerm } -func (l *fileLimits) SetDefPerm(perm os.FileMode) { +func (l *Limits) SetDefPerm(perm os.FileMode) { l.defPerm = perm } -func (l *fileLimits) MaxFileSize() int64 { +func (l *Limits) MaxFileSize() int64 { return l.maxFileSize } -func (l *fileLimits) SetMaxFileSize(max int64) { +func (l *Limits) SetMaxFileSize(max int64) { l.maxFileSize = max } -func (l *fileLimits) MaxReadSize() int64 { +func (l *Limits) MaxReadSize() int64 { return l.maxReadSize } -func (l *fileLimits) SetMaxReadSize(max int64) { +func (l *Limits) SetMaxReadSize(max int64) { l.maxReadSize = max } -func (l *fileLimits) MaxNumObjs() int64 { +func (l *Limits) MaxNumObjs() int64 { return l.maxNumObjs } -func (l *fileLimits) SetMaxNumObjs(max int64) { +func (l *Limits) SetMaxNumObjs(max int64) { l.maxNumObjs = max } -// FileInfo is struct with interface os.FileInfo -type FileInfo struct { - isDir bool - path string - *minio.ObjectInfo -} - -// Name is function that return file name -func (f *FileInfo) Name() string { - return f.path -} - -// Size is function that return file size -func (f *FileInfo) Size() int64 { - if f.ObjectInfo == nil { - return 0 - } - - return f.ObjectInfo.Size -} - -// Mode is function that return file mod structure -func (f *FileInfo) Mode() os.FileMode { - return 0644 -} - -// ModTime is function that return last modification time -func (f *FileInfo) ModTime() time.Time { - if f.ObjectInfo == nil { - return time.Now() - } - - return f.ObjectInfo.LastModified -} - -// IsDir is function that return true if it's directory -func (f *FileInfo) IsDir() bool { - return f.isDir -} - -// Sys is function that return dummy info -func (f *FileInfo) Sys() interface{} { - return nil -} - -// normPath is function that return path which was normalization -func normPath(path string) string { +// NormPath is function that return path which was normalization +func NormPath(path string) string { if path != "" { path = strings.Replace(filepath.Clean(path), "\\", "/", -1) } diff --git a/pkg/s3/fs.go b/pkg/filestorage/fs/fs.go similarity index 72% rename from pkg/s3/fs.go rename to pkg/filestorage/fs/fs.go index 7a7a8378..c0c1f3d4 100755 --- a/pkg/s3/fs.go +++ b/pkg/filestorage/fs/fs.go @@ -1,4 +1,4 @@ -package s3 +package fs import ( "io" @@ -6,22 +6,24 @@ import ( "os" "path/filepath" "strings" + + "soldr/pkg/filestorage" ) // LocalStorage is main class for LocalStorage API type LocalStorage struct { - fileLimits + *filestorage.Limits } -// NewLocalStorage is function that construct LocalStorage driver with FileStorage -func NewLocalStorage() (*LocalStorage, error) { +// New is function that construct LocalStorage driver with Storage +func New() (*LocalStorage, error) { return &LocalStorage{ - fileLimits: fileLimits{ - defPerm: 0644, - maxFileSize: 1024 * 1024 * 1024, - maxReadSize: 1024 * 1024 * 1024, - maxNumObjs: 1024 * 1024, - }, + Limits: filestorage.NewLimits( + 0644, + 1024*1024*1024, + 1024*1024*1024, + 1024*1024, + ), }, nil } @@ -30,7 +32,7 @@ func NewLocalStorage() (*LocalStorage, error) { func (s *LocalStorage) ListDir(path string) (map[string]os.FileInfo, error) { files, err := ioutil.ReadDir(path) if err != nil { - return nil, ErrListFailed + return nil, filestorage.ErrListFailed } var numObjs int64 @@ -38,8 +40,8 @@ func (s *LocalStorage) ListDir(path string) (map[string]os.FileInfo, error) { for _, f := range files { tree["/"+f.Name()] = f numObjs++ - if numObjs >= s.maxNumObjs { - return nil, ErrLimitExceeded + if numObjs >= s.Limits.MaxNumObjs() { + return nil, filestorage.ErrLimitExceeded } } @@ -50,11 +52,11 @@ func (s *LocalStorage) ListDir(path string) (map[string]os.FileInfo, error) { func (s *LocalStorage) addFile(base string, tree map[string]os.FileInfo) filepath.WalkFunc { var numObjs int64 return func(path string, info os.FileInfo, err error) error { - if err != nil || numObjs >= s.maxNumObjs { + if err != nil || numObjs >= s.Limits.MaxNumObjs() { return nil } - tree[strings.TrimPrefix(normPath(path), base)] = info + tree[strings.TrimPrefix(filestorage.NormPath(path), base)] = info numObjs++ return nil @@ -65,12 +67,12 @@ func (s *LocalStorage) addFile(base string, tree map[string]os.FileInfo) filepat // Return key in the map is relative path from input base path. func (s *LocalStorage) ListDirRec(path string) (map[string]os.FileInfo, error) { tree := make(map[string]os.FileInfo) - err := filepath.Walk(path, s.addFile(normPath(path), tree)) + err := filepath.Walk(path, s.addFile(filestorage.NormPath(path), tree)) if err != nil { - return nil, ErrListFailed + return nil, filestorage.ErrListFailed } - if int64(len(tree)) >= s.maxNumObjs { - return nil, ErrLimitExceeded + if int64(len(tree)) >= s.Limits.MaxNumObjs() { + return nil, filestorage.ErrLimitExceeded } return tree, nil @@ -80,10 +82,10 @@ func (s *LocalStorage) ListDirRec(path string) (map[string]os.FileInfo, error) { func (s *LocalStorage) GetInfo(path string) (os.FileInfo, error) { info, err := os.Stat(path) if os.IsNotExist(err) { - return nil, ErrNotFound + return nil, filestorage.ErrNotFound } if err != nil { - return nil, ErrOpenFailed + return nil, filestorage.ErrOpenFailed } return info, nil @@ -108,15 +110,15 @@ func (s *LocalStorage) ReadFile(path string) ([]byte, error) { if info, err := s.GetInfo(path); err != nil { return nil, err } else if info.IsDir() { - return nil, ErrNotFound - } else if info.Size() > s.maxFileSize { - return nil, ErrLimitExceeded + return nil, filestorage.ErrNotFound + } else if info.Size() > s.Limits.MaxFileSize() { + return nil, filestorage.ErrLimitExceeded } if data, err := ioutil.ReadFile(path); err == nil { return data, nil } - return nil, ErrReadFailed + return nil, filestorage.ErrReadFailed } // readFiles is additional function for reading files data by path @@ -135,8 +137,8 @@ func (s *LocalStorage) readFiles(files map[string]os.FileInfo, base string) (map } numObjs++ readSize += info.Size() - if numObjs >= s.maxNumObjs || readSize >= s.maxReadSize { - return nil, ErrLimitExceeded + if numObjs >= s.Limits.MaxNumObjs() || readSize >= s.Limits.MaxReadSize() { + return nil, filestorage.ErrLimitExceeded } } @@ -166,13 +168,13 @@ func (s *LocalStorage) ReadDirRec(path string) (map[string][]byte, error) { // CreateDir is function for create new directory if not exists func (s *LocalStorage) CreateDir(path string) error { if !s.IsExist(path) { - if os.Mkdir(path, s.defPerm) != nil { - return ErrCreateFailed + if os.Mkdir(path, s.Limits.DefPerm()) != nil { + return filestorage.ErrCreateFailed } return nil } - return ErrAlreadyExists + return filestorage.ErrAlreadyExists } // CreateFile is function for create new file if not exists @@ -180,31 +182,31 @@ func (s *LocalStorage) CreateFile(path string) error { if !s.IsExist(path) { file, err := os.Create(path) if err != nil { - return ErrCreateFailed + return filestorage.ErrCreateFailed } if file.Close() != nil { - return ErrCloseFailed + return filestorage.ErrCloseFailed } return nil } - return ErrAlreadyExists + return filestorage.ErrAlreadyExists } // WriteFile is function that write (override) data to a file func (s *LocalStorage) WriteFile(path string, data []byte) error { - file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, s.defPerm) + file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, s.Limits.DefPerm()) if err != nil { - return ErrOpenFailed + return filestorage.ErrOpenFailed } if file.Truncate(0) != nil { - return ErrWriteFailed + return filestorage.ErrWriteFailed } if _, err := file.Write(data); err != nil { - return ErrWriteFailed + return filestorage.ErrWriteFailed } if file.Close() != nil { - return ErrCloseFailed + return filestorage.ErrCloseFailed } return nil @@ -212,15 +214,15 @@ func (s *LocalStorage) WriteFile(path string, data []byte) error { // AppendFile is function that append data to an exist file func (s *LocalStorage) AppendFile(path string, data []byte) error { - file, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, s.defPerm) + file, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, s.Limits.DefPerm()) if err != nil { - return ErrOpenFailed + return filestorage.ErrOpenFailed } if _, err := file.Write(data); err != nil { - return ErrWriteFailed + return filestorage.ErrWriteFailed } if file.Close() != nil { - return ErrCloseFailed + return filestorage.ErrCloseFailed } return nil @@ -231,7 +233,7 @@ func (s *LocalStorage) RemoveDir(path string) error { info, err := s.GetInfo(path) if err == nil && info.IsDir() { if os.RemoveAll(path) != nil { - return ErrRemoveFailed + return filestorage.ErrRemoveFailed } return nil } @@ -244,7 +246,7 @@ func (s *LocalStorage) RemoveFile(path string) error { info, err := s.GetInfo(path) if err == nil && !info.IsDir() { if os.Remove(path) != nil { - return ErrRemoveFailed + return filestorage.ErrRemoveFailed } return nil } @@ -255,7 +257,7 @@ func (s *LocalStorage) RemoveFile(path string) error { // Remove is function that remove any exist object func (s *LocalStorage) Remove(path string) error { if os.RemoveAll(path) != nil { - return ErrRemoveFailed + return filestorage.ErrRemoveFailed } return nil @@ -264,7 +266,7 @@ func (s *LocalStorage) Remove(path string) error { // Rename is function that rename any exist object to new func (s *LocalStorage) Rename(src, dst string) error { if os.Rename(src, dst) != nil { - return ErrRenameFailed + return filestorage.ErrRenameFailed } return nil @@ -278,16 +280,16 @@ func (s *LocalStorage) CopyFile(src, dst string) error { return err } if !sfi.Mode().IsRegular() { - return ErrReadFailed + return filestorage.ErrReadFailed } dfi, err := s.GetInfo(dst) if err != nil { - if err != ErrNotFound { + if err != filestorage.ErrNotFound { return err } } else { if !dfi.Mode().IsRegular() { - return ErrWriteFailed + return filestorage.ErrWriteFailed } if os.SameFile(sfi, dfi) { return nil @@ -296,23 +298,23 @@ func (s *LocalStorage) CopyFile(src, dst string) error { in, err := os.Open(src) if err != nil { - return ErrOpenFailed + return filestorage.ErrOpenFailed } defer in.Close() out, err := os.Create(dst) if err != nil { - return ErrCreateFailed + return filestorage.ErrCreateFailed } defer func() { if out.Close() != nil { - err = ErrCloseFailed + err = filestorage.ErrCloseFailed } }() if _, err = io.Copy(out, in); err != nil { - return ErrCopyFailed + return filestorage.ErrCopyFailed } if out.Sync() != nil { - return ErrCopyFailed + return filestorage.ErrCopyFailed } return err diff --git a/pkg/s3/s3.go b/pkg/filestorage/s3/s3.go similarity index 65% rename from pkg/s3/s3.go rename to pkg/filestorage/s3/s3.go index b12034d3..44765098 100755 --- a/pkg/s3/s3.go +++ b/pkg/filestorage/s3/s3.go @@ -9,13 +9,16 @@ import ( "os" "path/filepath" "strings" + "time" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" + + "soldr/pkg/filestorage" ) -// RemoteStorage is main class for RemoteStorage API -type RemoteStorage struct { +// Client is main class for Client API +type Client struct { endpoint string accessKey string secretKey string @@ -23,18 +26,18 @@ type RemoteStorage struct { host string isSSL bool con *minio.Client - fileLimits + *filestorage.Limits } -type RemoteStorageConfig struct { +type Config struct { Endpoint string AccessKey string SecretKey string BucketName string } -// NewRemoteStorage is function that construct RemoteStorage driver with FileStorage -func NewRemoteStorage(cfg *RemoteStorageConfig) (*RemoteStorage, error) { +// New is function that construct Client driver with Storage +func New(cfg *Config) (*Client, error) { if cfg == nil { var err error cfg, err = getConnParamsFromEnvVars() @@ -42,13 +45,13 @@ func NewRemoteStorage(cfg *RemoteStorageConfig) (*RemoteStorage, error) { return nil, err } } - s := &RemoteStorage{ - fileLimits: fileLimits{ - defPerm: 0644, - maxFileSize: 1024 * 1024 * 1024, - maxReadSize: 1024 * 1024 * 1024, - maxNumObjs: 1024 * 1024, - }, + s := &Client{ + Limits: filestorage.NewLimits( + 0644, + 1024*1024*1024, + 1024*1024*1024, + 1024*1024, + ), endpoint: cfg.Endpoint, accessKey: cfg.AccessKey, secretKey: cfg.SecretKey, @@ -59,7 +62,7 @@ func NewRemoteStorage(cfg *RemoteStorageConfig) (*RemoteStorage, error) { s.isSSL = stURL.Scheme == "https" s.host = stURL.Host } else { - return nil, ErrInternal + return nil, filestorage.ErrInternal } var err error @@ -68,14 +71,14 @@ func NewRemoteStorage(cfg *RemoteStorageConfig) (*RemoteStorage, error) { Secure: s.isSSL, }) if err != nil { - return nil, ErrInternal + return nil, filestorage.ErrInternal } return s, nil } -func getConnParamsFromEnvVars() (*RemoteStorageConfig, error) { - params := &RemoteStorageConfig{} +func getConnParamsFromEnvVars() (*Config, error) { + params := &Config{} var ok bool genErr := func(missingEnvVar string) error { return fmt.Errorf("environment variable %s is undefined", missingEnvVar) @@ -102,19 +105,19 @@ func getConnParamsFromEnvVars() (*RemoteStorageConfig, error) { } // ListDir is function that return listing directory with filea info -func (c *RemoteStorage) ListDir(path string) (map[string]os.FileInfo, error) { +func (c *Client) ListDir(path string) (map[string]os.FileInfo, error) { var numObjs int64 tree := make(map[string]os.FileInfo) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - path = normPath(path) + path = filestorage.NormPath(path) objectCh := c.con.ListObjects(ctx, c.bucketName, minio.ListObjectsOptions{ Prefix: path, Recursive: true, }) for object := range objectCh { if object.Err != nil { - return nil, ErrListFailed + return nil, filestorage.ErrListFailed } shortPath := strings.TrimPrefix(object.Key, path) if !strings.HasPrefix(shortPath, "/") { @@ -139,8 +142,8 @@ func (c *RemoteStorage) ListDir(path string) (map[string]os.FileInfo, error) { numObjs++ } } - if numObjs >= c.maxNumObjs { - return nil, ErrLimitExceeded + if numObjs >= c.Limits.MaxNumObjs() { + return nil, filestorage.ErrLimitExceeded } } @@ -148,14 +151,14 @@ func (c *RemoteStorage) ListDir(path string) (map[string]os.FileInfo, error) { } // addFile is additional function for walking on directory and return back info -func (c *RemoteStorage) addFile(base string, tree map[string]os.FileInfo, numObjs *int64) error { +func (c *Client) addFile(base string, tree map[string]os.FileInfo, numObjs *int64) error { ttree, err := c.ListDir(base) if err != nil { return err } for path, info := range ttree { - npath := normPath(base + path) + npath := filestorage.NormPath(base + path) if _, ok := tree[npath]; !ok { tree[npath] = info (*numObjs)++ @@ -164,8 +167,8 @@ func (c *RemoteStorage) addFile(base string, tree map[string]os.FileInfo, numObj return err } } - if *numObjs >= c.maxNumObjs { - return ErrLimitExceeded + if *numObjs >= c.Limits.MaxNumObjs() { + return filestorage.ErrLimitExceeded } } } @@ -174,10 +177,10 @@ func (c *RemoteStorage) addFile(base string, tree map[string]os.FileInfo, numObj } // ListDirRec is function that return listing directory with filea info -func (c *RemoteStorage) ListDirRec(path string) (map[string]os.FileInfo, error) { +func (c *Client) ListDirRec(path string) (map[string]os.FileInfo, error) { var numObjs int64 tree := make(map[string]os.FileInfo) - path = normPath(path) + path = filestorage.NormPath(path) if err := c.addFile(path, tree, &numObjs); err != nil { return nil, err } @@ -194,15 +197,15 @@ func (c *RemoteStorage) ListDirRec(path string) (map[string]os.FileInfo, error) } // GetInfo is function that return file info -func (c *RemoteStorage) GetInfo(path string) (os.FileInfo, error) { - path = normPath(path) +func (c *Client) GetInfo(path string) (os.FileInfo, error) { + path = filestorage.NormPath(path) ctx, cancel := context.WithCancel(context.Background()) defer cancel() objInfo, err := c.con.StatObject(ctx, c.bucketName, path, minio.StatObjectOptions{}) if err != nil { tree, err := c.ListDir(path) if err != nil || len(tree) == 0 { - return nil, ErrNotFound + return nil, filestorage.ErrNotFound } return &FileInfo{ isDir: true, @@ -219,7 +222,7 @@ func (c *RemoteStorage) GetInfo(path string) (os.FileInfo, error) { } // IsExist is function that return true if file exists -func (c *RemoteStorage) IsExist(path string) bool { +func (c *Client) IsExist(path string) bool { if _, err := c.GetInfo(path); err != nil { return false } @@ -228,42 +231,42 @@ func (c *RemoteStorage) IsExist(path string) bool { } // IsNotExist is function that return true if file not exists -func (c *RemoteStorage) IsNotExist(path string) bool { +func (c *Client) IsNotExist(path string) bool { return !c.IsExist(path) } // ReadFile is function that return the file data -func (c *RemoteStorage) ReadFile(path string) ([]byte, error) { +func (c *Client) ReadFile(path string) ([]byte, error) { var objData []byte var objInfo minio.ObjectInfo if info, err := c.GetInfo(path); err != nil { return nil, err } else if info.IsDir() { - return nil, ErrNotFound - } else if info.Size() > c.maxFileSize { - return nil, ErrLimitExceeded + return nil, filestorage.ErrNotFound + } else if info.Size() > c.Limits.MaxReadSize() { + return nil, filestorage.ErrLimitExceeded } ctx, cancel := context.WithCancel(context.Background()) defer cancel() obj, err := c.con.GetObject(ctx, c.bucketName, path, minio.GetObjectOptions{}) if err != nil { - return nil, fmt.Errorf("%w: %v", ErrReadFailed, err) + return nil, fmt.Errorf("%w: %v", filestorage.ErrReadFailed, err) } defer obj.Close() if objInfo, err = obj.Stat(); err != nil { - return nil, fmt.Errorf("%w: %v", ErrReadFailed, err) + return nil, fmt.Errorf("%w: %v", filestorage.ErrReadFailed, err) } objData = make([]byte, objInfo.Size) if n, err := io.ReadFull(obj, objData); err != nil || n != int(objInfo.Size) { - return nil, fmt.Errorf("%w: %v", ErrReadFailed, err) + return nil, fmt.Errorf("%w: %v", filestorage.ErrReadFailed, err) } return objData, nil } // readFiles is additional function for reading files data by path -func (c *RemoteStorage) readFiles(files map[string]os.FileInfo, base string) (map[string][]byte, error) { +func (c *Client) readFiles(files map[string]os.FileInfo, base string) (map[string][]byte, error) { var err error var numObjs, readSize int64 tree := make(map[string][]byte) @@ -272,14 +275,14 @@ func (c *RemoteStorage) readFiles(files map[string]os.FileInfo, base string) (ma continue } - fpath := normPath(base + name) + fpath := filestorage.NormPath(base + name) if tree[name], err = c.ReadFile(fpath); err != nil { return nil, err } numObjs++ readSize += info.Size() - if numObjs >= c.maxNumObjs || readSize >= c.maxReadSize { - return nil, ErrLimitExceeded + if numObjs >= c.Limits.MaxNumObjs() || readSize >= c.Limits.MaxReadSize() { + return nil, filestorage.ErrLimitExceeded } } @@ -287,8 +290,8 @@ func (c *RemoteStorage) readFiles(files map[string]os.FileInfo, base string) (ma } // ReadDir is function that read all files in the directory -func (c *RemoteStorage) ReadDir(path string) (map[string][]byte, error) { - path = normPath(path) +func (c *Client) ReadDir(path string) (map[string][]byte, error) { + path = filestorage.NormPath(path) files, err := c.ListDir(path) if err != nil { return nil, err @@ -298,8 +301,8 @@ func (c *RemoteStorage) ReadDir(path string) (map[string][]byte, error) { } // ReadDirRec is function that recursive read all files in the directory -func (c *RemoteStorage) ReadDirRec(path string) (map[string][]byte, error) { - path = normPath(path) +func (c *Client) ReadDirRec(path string) (map[string][]byte, error) { + path = filestorage.NormPath(path) files, err := c.ListDirRec(path) if err != nil { return nil, err @@ -309,18 +312,18 @@ func (c *RemoteStorage) ReadDirRec(path string) (map[string][]byte, error) { } // CreateDir is function for create new directory if not exists -func (c *RemoteStorage) CreateDir(path string) error { - path = normPath(path) +func (c *Client) CreateDir(path string) error { + path = filestorage.NormPath(path) if !c.IsExist(path) { return nil } - return ErrAlreadyExists + return filestorage.ErrAlreadyExists } // CreateFile is function for create new file if not exists -func (c *RemoteStorage) CreateFile(path string) error { - path = normPath(path) +func (c *Client) CreateFile(path string) error { + path = filestorage.NormPath(path) if !c.IsExist(path) { r := bytes.NewReader([]byte{}) ctx, cancel := context.WithCancel(context.Background()) @@ -328,31 +331,31 @@ func (c *RemoteStorage) CreateFile(path string) error { _, err := c.con.PutObject(ctx, c.bucketName, path[1:], r, 0, minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { - return ErrCreateFailed + return filestorage.ErrCreateFailed } return nil } - return ErrAlreadyExists + return filestorage.ErrAlreadyExists } // WriteFile is function that write (override) data to a file -func (c *RemoteStorage) WriteFile(path string, data []byte) error { - path = normPath(path) +func (c *Client) WriteFile(path string, data []byte) error { + path = filestorage.NormPath(path) r := bytes.NewReader(data) ctx, cancel := context.WithCancel(context.Background()) defer cancel() _, err := c.con.PutObject(ctx, c.bucketName, path[1:], r, r.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { - return ErrWriteFailed + return filestorage.ErrWriteFailed } return nil } // AppendFile is function that append data to an exist file -func (c *RemoteStorage) AppendFile(path string, data []byte) error { +func (c *Client) AppendFile(path string, data []byte) error { rdata, err := c.ReadFile(path) if err != nil { return err @@ -362,13 +365,13 @@ func (c *RemoteStorage) AppendFile(path string, data []byte) error { } // RemoveDir is function that remove an exist directory -func (c *RemoteStorage) RemoveDir(path string) error { - path = normPath(path) +func (c *Client) RemoveDir(path string) error { + path = filestorage.NormPath(path) info, err := c.GetInfo(path) if err == nil && info.IsDir() { files, err := c.ListDirRec(path) if err != nil { - return ErrRemoveFailed + return filestorage.ErrRemoveFailed } for fpath, info := range files { if !info.IsDir() { @@ -376,7 +379,7 @@ func (c *RemoteStorage) RemoveDir(path string) error { defer cancel() if c.con.RemoveObject(ctx, c.bucketName, path+fpath, minio.RemoveObjectOptions{}) != nil { - return ErrRemoveFailed + return filestorage.ErrRemoveFailed } } } @@ -387,15 +390,15 @@ func (c *RemoteStorage) RemoveDir(path string) error { } // RemoveFile is function that remove an exist file -func (c *RemoteStorage) RemoveFile(path string) error { - path = normPath(path) +func (c *Client) RemoveFile(path string) error { + path = filestorage.NormPath(path) info, err := c.GetInfo(path) if err == nil && !info.IsDir() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() if c.con.RemoveObject(ctx, c.bucketName, path, minio.RemoveObjectOptions{}) != nil { - return ErrRemoveFailed + return filestorage.ErrRemoveFailed } return nil } @@ -404,7 +407,7 @@ func (c *RemoteStorage) RemoveFile(path string) error { } // Remove is function that remove any exist object -func (c *RemoteStorage) Remove(path string) error { +func (c *Client) Remove(path string) error { info, err := c.GetInfo(path) if err == nil && !info.IsDir() { return c.RemoveFile(path) @@ -412,45 +415,90 @@ func (c *RemoteStorage) Remove(path string) error { return c.RemoveDir(path) } - return ErrRemoveFailed + return filestorage.ErrRemoveFailed } // Rename is function that rename any exist object to new -func (c *RemoteStorage) Rename(src, dst string) error { +func (c *Client) Rename(src, dst string) error { if err := c.CopyFile(src, dst); err != nil { return err } if c.RemoveFile(src) != nil { - return ErrRenameFailed + return filestorage.ErrRenameFailed } return nil } // CopyFile is function that copies a file from src to dst -func (c *RemoteStorage) CopyFile(src, dst string) error { +func (c *Client) CopyFile(src, dst string) error { isrc, err := c.GetInfo(src) if err != nil || isrc.IsDir() { - return ErrNotFound + return filestorage.ErrNotFound } if c.IsExist(dst) { - return ErrAlreadyExists + return filestorage.ErrAlreadyExists } nsrc := minio.CopySrcOptions{ Bucket: c.bucketName, - Object: normPath(src)[1:], + Object: filestorage.NormPath(src)[1:], } ndst := minio.CopyDestOptions{ Bucket: c.bucketName, - Object: normPath(dst)[1:], + Object: filestorage.NormPath(dst)[1:], } ctx, cancel := context.WithCancel(context.Background()) defer cancel() if _, err = c.con.CopyObject(ctx, ndst, nsrc); err != nil { - return ErrCopyFailed + return filestorage.ErrCopyFailed + } + + return nil +} + +// FileInfo is struct with interface os.FileInfo +type FileInfo struct { + isDir bool + path string + *minio.ObjectInfo +} + +// Name is function that return file name +func (f *FileInfo) Name() string { + return f.path +} + +// Size is function that return file size +func (f *FileInfo) Size() int64 { + if f.ObjectInfo == nil { + return 0 } + return f.ObjectInfo.Size +} + +// Mode is function that return file mod structure +func (f *FileInfo) Mode() os.FileMode { + return 0644 +} + +// ModTime is function that return last modification time +func (f *FileInfo) ModTime() time.Time { + if f.ObjectInfo == nil { + return time.Now() + } + + return f.ObjectInfo.LastModified +} + +// IsDir is function that return true if it's directory +func (f *FileInfo) IsDir() bool { + return f.isDir +} + +// Sys is function that return dummy info +func (f *FileInfo) Sys() interface{} { return nil } diff --git a/pkg/log/log.go b/pkg/logtooling/log.go similarity index 96% rename from pkg/log/log.go rename to pkg/logtooling/log.go index 98ce849f..bba7b98a 100644 --- a/pkg/log/log.go +++ b/pkg/logtooling/log.go @@ -1,4 +1,4 @@ -package log +package logtooling import ( "strings" diff --git a/pkg/semver/semver.go b/pkg/semvertooling/semver.go similarity index 98% rename from pkg/semver/semver.go rename to pkg/semvertooling/semver.go index a1c68d71..b36d400b 100644 --- a/pkg/semver/semver.go +++ b/pkg/semvertooling/semver.go @@ -1,4 +1,4 @@ -package semver +package semvertooling import ( "strings" diff --git a/pkg/vxproto/proto.go b/pkg/vxproto/proto.go index e6af0c1f..b5e7b899 100644 --- a/pkg/vxproto/proto.go +++ b/pkg/vxproto/proto.go @@ -21,7 +21,7 @@ import ( "github.com/sirupsen/logrus" - "soldr/pkg/s3" + "soldr/pkg/filestorage/fs" "soldr/pkg/system" "soldr/pkg/vxproto/tunnel" ) @@ -222,7 +222,7 @@ func New(mmodule IMainModule) (IVXProto, error) { tokenKey = append(tokenKey, tokenKey[:8]...) // VXStore files rotation - vxstore, err := s3.NewLocalStorage() + vxstore, err := fs.New() if err != nil { return nil, fmt.Errorf("failed to create a vxstore handler: %w", err) }