diff --git a/commands/databases.go b/commands/databases.go index d9e31b8dd..e0970054d 100644 --- a/commands/databases.go +++ b/commands/databases.go @@ -2444,6 +2444,7 @@ This command functions as a PATCH request, meaning that only the specified field displayerType(&displayers.RedisConfiguration{}), displayerType(&displayers.MongoDBConfiguration{}), displayerType(&displayers.KafkaConfiguration{}), + displayerType(&displayers.OpensearchConfiguration{}), ) AddStringFlag( getDatabaseCfgCommand, @@ -2498,14 +2499,15 @@ func RunDatabaseConfigurationGet(c *CmdConfig) error { } allowedEngines := map[string]any{ - "mysql": nil, - "pg": nil, - "redis": nil, - "mongodb": nil, - "kafka": nil, + "mysql": nil, + "pg": nil, + "redis": nil, + "mongodb": nil, + "kafka": nil, + "opensearch": nil, } if _, ok := allowedEngines[engine]; !ok { - return fmt.Errorf("(%s) command: engine must be one of: 'pg', 'mysql', 'redis', 'mongodb', 'kafka'", c.NS) + return fmt.Errorf("(%s) command: engine must be one of: 'pg', 'mysql', 'redis', 'mongodb', 'kafka', opensearch", c.NS) } dbId := args[0] @@ -2559,6 +2561,16 @@ func RunDatabaseConfigurationGet(c *CmdConfig) error { KafkaConfig: *config, } return c.Display(&displayer) + } else if engine == "opensearch" { + config, err := c.Databases().GetOpensearchConfiguration(dbId) + if err != nil { + return err + } + + displayer := displayers.OpensearchConfiguration{ + OpensearchConfig: *config, + } + return c.Display(&displayer) } return nil @@ -2579,14 +2591,15 @@ func RunDatabaseConfigurationUpdate(c *CmdConfig) error { } allowedEngines := map[string]any{ - "mysql": nil, - "pg": nil, - "redis": nil, - "mongodb": nil, - "kafka": nil, + "mysql": nil, + "pg": nil, + "redis": nil, + "mongodb": nil, + "kafka": nil, + "opensearch": nil, } if _, ok := allowedEngines[engine]; !ok { - return fmt.Errorf("(%s) command: engine must be one of: 'pg', 'mysql', 'redis', 'mongodb', 'kafka'", c.NS) + return fmt.Errorf("(%s) command: engine must be one of: 'pg', 'mysql', 'redis', 'mongodb', 'kafka', 'opensearch'", c.NS) } configJson, err := c.Doit.GetString(c.NS, doctl.ArgDatabaseConfigJson) @@ -2620,6 +2633,11 @@ func RunDatabaseConfigurationUpdate(c *CmdConfig) error { if err != nil { return err } + } else if engine == "opensearch" { + err := c.Databases().UpdateOpensearchConfiguration(dbId, configJson) + if err != nil { + return err + } } return nil diff --git a/commands/databases_test.go b/commands/databases_test.go index 05594019e..e9cb1c50c 100644 --- a/commands/databases_test.go +++ b/commands/databases_test.go @@ -219,6 +219,10 @@ var ( KafkaConfig: &godo.KafkaConfig{}, } + testOpensearchConfiguration = do.OpensearchConfig{ + OpensearchConfig: &godo.OpensearchConfig{}, + } + topicReplicationFactor = uint32(3) testKafkaTopic = do.DatabaseTopic{ DatabaseTopic: &godo.DatabaseTopic{ @@ -1680,6 +1684,16 @@ func TestDatabaseConfigurationGet(t *testing.T) { assert.NoError(t, err) }) + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + tm.databases.EXPECT().GetOpensearchConfiguration(testDBCluster.ID).Return(&testOpensearchConfiguration, nil) + config.Args = append(config.Args, testDBCluster.ID) + config.Doit.Set(config.NS, doctl.ArgDatabaseEngine, "opensearch") + + err := RunDatabaseConfigurationGet(config) + + assert.NoError(t, err) + }) + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { err := RunDatabaseConfigurationGet(config) @@ -1754,6 +1768,16 @@ func TestDatabaseConfigurationUpdate(t *testing.T) { assert.NoError(t, err) }) + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + tm.databases.EXPECT().UpdateOpensearchConfiguration(testDBCluster.ID, "").Return(nil) + config.Args = append(config.Args, testDBCluster.ID) + config.Doit.Set(config.NS, doctl.ArgDatabaseEngine, "opensearch") + + err := RunDatabaseConfigurationUpdate(config) + + assert.NoError(t, err) + }) + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { err := RunDatabaseConfigurationUpdate(config) diff --git a/commands/displayers/database.go b/commands/displayers/database.go index 033c8cb69..169d8a47c 100644 --- a/commands/displayers/database.go +++ b/commands/displayers/database.go @@ -1872,6 +1872,265 @@ func (dc *KafkaConfiguration) KV() []map[string]any { return o } +type OpensearchConfiguration struct { + OpensearchConfig do.OpensearchConfig +} + +var _ Displayable = &OpensearchConfiguration{} + +func (dc *OpensearchConfiguration) JSON(out io.Writer) error { + return writeJSON(dc.OpensearchConfig, out) +} + +func (dc *OpensearchConfiguration) Cols() []string { + return []string{ + "key", + "value", + } +} + +func (dc *OpensearchConfiguration) ColMap() map[string]string { + return map[string]string{ + "key": "key", + "value": "value", + } +} + +func (dc *OpensearchConfiguration) KV() []map[string]any { + c := dc.OpensearchConfig + o := []map[string]any{} + if c.HttpMaxContentLengthBytes != nil { + o = append(o, map[string]any{ + "key": "HttpMaxContentLengthBytes", + "value": *c.HttpMaxContentLengthBytes, + }) + } + if c.HttpMaxHeaderSizeBytes != nil { + o = append(o, map[string]any{ + "key": "HttpMaxHeaderSizeBytes", + "value": *c.HttpMaxHeaderSizeBytes, + }) + } + if c.HttpMaxInitialLineLengthBytes != nil { + o = append(o, map[string]any{ + "key": "HttpMaxInitialLineLengthBytes", + "value": *c.HttpMaxInitialLineLengthBytes, + }) + } + if c.IndicesQueryBoolMaxClauseCount != nil { + o = append(o, map[string]any{ + "key": "IndicesQueryBoolMaxClauseCount", + "value": *c.IndicesQueryBoolMaxClauseCount, + }) + } + if c.IndicesFielddataCacheSizePercentage != nil { + o = append(o, map[string]any{ + "key": "IndicesFielddataCacheSizePercentage", + "value": *c.IndicesFielddataCacheSizePercentage, + }) + } + if c.IndicesMemoryIndexBufferSizePercentage != nil { + o = append(o, map[string]any{ + "key": "IndicesMemoryIndexBufferSizePercentage", + "value": *c.IndicesMemoryIndexBufferSizePercentage, + }) + } + if c.IndicesMemoryMinIndexBufferSizeMb != nil { + o = append(o, map[string]any{ + "key": "IndicesMemoryMinIndexBufferSizeMb", + "value": *c.IndicesMemoryMinIndexBufferSizeMb, + }) + } + if c.IndicesMemoryMaxIndexBufferSizeMb != nil { + o = append(o, map[string]any{ + "key": "IndicesMemoryMaxIndexBufferSizeMb", + "value": *c.IndicesMemoryMaxIndexBufferSizeMb, + }) + } + if c.IndicesQueriesCacheSizePercentage != nil { + o = append(o, map[string]any{ + "key": "IndicesQueriesCacheSizePercentage", + "value": *c.IndicesQueriesCacheSizePercentage, + }) + } + if c.IndicesRecoveryMaxMbPerSec != nil { + o = append(o, map[string]any{ + "key": "IndicesRecoveryMaxMbPerSec", + "value": *c.IndicesRecoveryMaxMbPerSec, + }) + } + if c.IndicesRecoveryMaxConcurrentFileChunks != nil { + o = append(o, map[string]any{ + "key": "IndicesRecoveryMaxConcurrentFileChunks", + "value": *c.IndicesRecoveryMaxConcurrentFileChunks, + }) + } + if c.ThreadPoolSearchSize != nil { + o = append(o, map[string]any{ + "key": "ThreadPoolSearchSize", + "value": *c.ThreadPoolSearchSize, + }) + } + if c.ThreadPoolSearchThrottledSize != nil { + o = append(o, map[string]any{ + "key": "ThreadPoolSearchThrottledSize", + "value": *c.ThreadPoolSearchThrottledSize, + }) + } + if c.ThreadPoolGetSize != nil { + o = append(o, map[string]any{ + "key": "ThreadPoolGetSize", + "value": *c.ThreadPoolGetSize, + }) + } + if c.ThreadPoolAnalyzeSize != nil { + o = append(o, map[string]any{ + "key": "ThreadPoolAnalyzeSize", + "value": *c.ThreadPoolAnalyzeSize, + }) + } + if c.ThreadPoolWriteSize != nil { + o = append(o, map[string]any{ + "key": "ThreadPoolWriteSize", + "value": *c.ThreadPoolWriteSize, + }) + } + if c.ThreadPoolForceMergeSize != nil { + o = append(o, map[string]any{ + "key": "ThreadPoolForceMergeSize", + "value": *c.ThreadPoolForceMergeSize, + }) + } + if c.ThreadPoolSearchQueueSize != nil { + o = append(o, map[string]any{ + "key": "ThreadPoolSearchQueueSize", + "value": *c.ThreadPoolSearchQueueSize, + }) + } + if c.ThreadPoolSearchThrottledQueueSize != nil { + o = append(o, map[string]any{ + "key": "ThreadPoolSearchThrottledQueueSize", + "value": *c.ThreadPoolSearchThrottledQueueSize, + }) + } + if c.ThreadPoolGetQueueSize != nil { + o = append(o, map[string]any{ + "key": "ThreadPoolGetQueueSize", + "value": *c.ThreadPoolGetQueueSize, + }) + } + if c.ThreadPoolAnalyzeQueueSize != nil { + o = append(o, map[string]any{ + "key": "ThreadPoolAnalyzeQueueSize", + "value": *c.ThreadPoolAnalyzeQueueSize, + }) + } + if c.ThreadPoolWriteQueueSize != nil { + o = append(o, map[string]any{ + "key": "ThreadPoolWriteQueueSize", + "value": *c.ThreadPoolWriteQueueSize, + }) + } + if c.IsmEnabled != nil { + o = append(o, map[string]any{ + "key": "IsmEnabled", + "value": *c.IsmEnabled, + }) + } + if c.IsmHistoryEnabled != nil { + o = append(o, map[string]any{ + "key": "IsmHistoryEnabled", + "value": *c.IsmHistoryEnabled, + }) + } + if c.IsmHistoryMaxAgeHours != nil { + o = append(o, map[string]any{ + "key": "IsmHistoryMaxAgeHours", + "value": *c.IsmHistoryMaxAgeHours, + }) + } + if c.IsmHistoryMaxDocs != nil { + o = append(o, map[string]any{ + "key": "IsmHistoryMaxDocs", + "value": *c.IsmHistoryMaxDocs, + }) + } + if c.IsmHistoryRolloverCheckPeriodHours != nil { + o = append(o, map[string]any{ + "key": "IsmHistoryRolloverCheckPeriodHours", + "value": *c.IsmHistoryRolloverCheckPeriodHours, + }) + } + if c.IsmHistoryRolloverRetentionPeriodDays != nil { + o = append(o, map[string]any{ + "key": "IsmHistoryRolloverRetentionPeriodDays", + "value": *c.IsmHistoryRolloverRetentionPeriodDays, + }) + } + if c.SearchMaxBuckets != nil { + o = append(o, map[string]any{ + "key": "SearchMaxBuckets", + "value": *c.SearchMaxBuckets, + }) + } + if c.ActionAutoCreateIndexEnabled != nil { + o = append(o, map[string]any{ + "key": "ActionAutoCreateIndexEnabled", + "value": *c.ActionAutoCreateIndexEnabled, + }) + } + if c.EnableSecurityAudit != nil { + o = append(o, map[string]any{ + "key": "EnableSecurityAudit", + "value": *c.EnableSecurityAudit, + }) + } + if c.ActionDestructiveRequiresName != nil { + o = append(o, map[string]any{ + "key": "ActionDestructiveRequiresName", + "value": *c.ActionDestructiveRequiresName, + }) + } + if c.ClusterMaxShardsPerNode != nil { + o = append(o, map[string]any{ + "key": "ClusterMaxShardsPerNode", + "value": *c.ClusterMaxShardsPerNode, + }) + } + if c.OverrideMainResponseVersion != nil { + o = append(o, map[string]any{ + "key": "OverrideMainResponseVersion", + "value": *c.OverrideMainResponseVersion, + }) + } + if c.ScriptMaxCompilationsRate != nil { + o = append(o, map[string]any{ + "key": "ScriptMaxCompilationsRate", + "value": *c.ScriptMaxCompilationsRate, + }) + } + if c.ClusterRoutingAllocationNodeConcurrentRecoveries != nil { + o = append(o, map[string]any{ + "key": "ClusterRoutingAllocationNodeConcurrentRecoveries", + "value": *c.ClusterRoutingAllocationNodeConcurrentRecoveries, + }) + } + if c.ReindexRemoteWhitelist != nil { + o = append(o, map[string]any{ + "key": "ReindexRemoteWhitelist", + "value": c.ReindexRemoteWhitelist, + }) + } + if c.PluginsAlertingFilterByBackendRolesEnabled != nil { + o = append(o, map[string]any{ + "key": "PluginsAlertingFilterByBackendRolesEnabled", + "value": *c.PluginsAlertingFilterByBackendRolesEnabled, + }) + } + + return o +} + type DatabaseEvents struct { DatabaseEvents do.DatabaseEvents } diff --git a/do/databases.go b/do/databases.go index 8dc385031..8865866a1 100644 --- a/do/databases.go +++ b/do/databases.go @@ -130,6 +130,11 @@ type KafkaConfig struct { *godo.KafkaConfig } +// OpensearchConfig is a wrapper for godo.OpensearchConfig +type OpensearchConfig struct { + *godo.OpensearchConfig +} + // DatabaseTopics is a slice of DatabaseTopic type DatabaseTopics []DatabaseTopic @@ -212,12 +217,14 @@ type DatabasesService interface { GetRedisConfiguration(databaseID string) (*RedisConfig, error) GetMongoDBConfiguration(databaseID string) (*MongoDBConfig, error) GetKafkaConfiguration(databaseID string) (*KafkaConfig, error) + GetOpensearchConfiguration(databaseID string) (*OpensearchConfig, error) UpdateMySQLConfiguration(databaseID string, confString string) error UpdatePostgreSQLConfiguration(databaseID string, confString string) error UpdateRedisConfiguration(databaseID string, confString string) error UpdateMongoDBConfiguration(databaseID string, confString string) error UpdateKafkaConfiguration(databaseID string, confString string) error + UpdateOpensearchConfiguration(databaseID string, confString string) error ListTopics(string) (DatabaseTopics, error) GetTopic(string, string) (*DatabaseTopic, error) @@ -731,6 +738,17 @@ func (ds *databasesService) GetKafkaConfiguration(databaseID string) (*KafkaConf }, nil } +func (ds *databasesService) GetOpensearchConfiguration(databaseID string) (*OpensearchConfig, error) { + cfg, _, err := ds.client.Databases.GetOpensearchConfig(context.TODO(), databaseID) + if err != nil { + return nil, err + } + + return &OpensearchConfig{ + OpensearchConfig: cfg, + }, nil +} + func (ds *databasesService) UpdateMySQLConfiguration(databaseID string, confString string) error { var conf godo.MySQLConfig err := json.Unmarshal([]byte(confString), &conf) @@ -806,6 +824,21 @@ func (ds *databasesService) UpdateKafkaConfiguration(databaseID string, confStri return nil } +func (ds *databasesService) UpdateOpensearchConfiguration(databaseID string, confString string) error { + var conf godo.OpensearchConfig + err := json.Unmarshal([]byte(confString), &conf) + if err != nil { + return err + } + + _, err = ds.client.Databases.UpdateOpensearchConfig(context.TODO(), databaseID, &conf) + if err != nil { + return err + } + + return nil +} + func (ds *databasesService) ListTopics(databaseID string) (DatabaseTopics, error) { f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Databases.ListTopics(context.TODO(), databaseID, opt) diff --git a/do/mocks/DatabasesService.go b/do/mocks/DatabasesService.go index 3b6bc1d11..b9f2a99c0 100644 --- a/do/mocks/DatabasesService.go +++ b/do/mocks/DatabasesService.go @@ -363,6 +363,21 @@ func (mr *MockDatabasesServiceMockRecorder) GetMySQLConfiguration(databaseID any return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMySQLConfiguration", reflect.TypeOf((*MockDatabasesService)(nil).GetMySQLConfiguration), databaseID) } +// GetOpensearchConfiguration mocks base method. +func (m *MockDatabasesService) GetOpensearchConfiguration(databaseID string) (*do.OpensearchConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetOpensearchConfiguration", databaseID) + ret0, _ := ret[0].(*do.OpensearchConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetOpensearchConfiguration indicates an expected call of GetOpensearchConfiguration. +func (mr *MockDatabasesServiceMockRecorder) GetOpensearchConfiguration(databaseID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOpensearchConfiguration", reflect.TypeOf((*MockDatabasesService)(nil).GetOpensearchConfiguration), databaseID) +} + // GetPool mocks base method. func (m *MockDatabasesService) GetPool(arg0, arg1 string) (*do.DatabasePool, error) { m.ctrl.T.Helper() @@ -793,6 +808,20 @@ func (mr *MockDatabasesServiceMockRecorder) UpdateMySQLConfiguration(databaseID, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateMySQLConfiguration", reflect.TypeOf((*MockDatabasesService)(nil).UpdateMySQLConfiguration), databaseID, confString) } +// UpdateOpensearchConfiguration mocks base method. +func (m *MockDatabasesService) UpdateOpensearchConfiguration(databaseID, confString string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateOpensearchConfiguration", databaseID, confString) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateOpensearchConfiguration indicates an expected call of UpdateOpensearchConfiguration. +func (mr *MockDatabasesServiceMockRecorder) UpdateOpensearchConfiguration(databaseID, confString any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateOpensearchConfiguration", reflect.TypeOf((*MockDatabasesService)(nil).UpdateOpensearchConfiguration), databaseID, confString) +} + // UpdatePool mocks base method. func (m *MockDatabasesService) UpdatePool(arg0, arg1 string, arg2 *godo.DatabaseUpdatePoolRequest) error { m.ctrl.T.Helper() diff --git a/go.mod b/go.mod index 59323d6ac..e21ee632d 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22 require ( github.com/blang/semver v3.5.1+incompatible github.com/creack/pty v1.1.21 - github.com/digitalocean/godo v1.126.0 + github.com/digitalocean/godo v1.126.1-0.20241002131132-fb61c333ae26 github.com/docker/cli v24.0.5+incompatible github.com/docker/docker v25.0.6+incompatible github.com/docker/docker-credential-helpers v0.7.0 // indirect diff --git a/go.sum b/go.sum index c33695285..a79d790da 100644 --- a/go.sum +++ b/go.sum @@ -91,8 +91,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitalocean/godo v1.126.0 h1:+Znh7VMQj/E8ArbjWnc7OKGjWfzC+I8OCSRp7r1MdD8= -github.com/digitalocean/godo v1.126.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.126.1-0.20241002131132-fb61c333ae26 h1:Bqg9D5DoRi1UzBL9wdQmcqsPDzEDupY6eLE3gBpDy4Q= +github.com/digitalocean/godo v1.126.1-0.20241002131132-fb61c333ae26/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/cli v24.0.5+incompatible h1:WeBimjvS0eKdH4Ygx+ihVq1Q++xg36M/rMi4aXAvodc= diff --git a/integration/database_config_get_test.go b/integration/database_config_get_test.go index 033e12fd2..75e2c42a2 100644 --- a/integration/database_config_get_test.go +++ b/integration/database_config_get_test.go @@ -84,6 +84,18 @@ var _ = suite("database/config/get", func(t *testing.T, when spec.G, it spec.S) } w.Write([]byte(databaseConfigKafkaGetResponse)) + case "/v2/databases/opensearch-database-id/config": + auth := req.Header.Get("Authorization") + if auth != "Bearer some-magic-token" { + w.WriteHeader(http.StatusTeapot) + } + + if req.Method != http.MethodGet { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + w.Write([]byte(databaseConfigOpensearchGetResponse)) default: dump, err := httputil.DumpRequest(req, true) if err != nil { @@ -312,5 +324,47 @@ RedisACLChannelsDefault allchannels "log_segment_delete_delay_ms": 60000, "auto_create_topics_enable": true } + }` + + databaseConfigOpensearchGetResponse = `{ + "config": { + "ism_enabled": true, + "ism_history_enabled": true, + "ism_history_max_age_hours": 24, + "ism_history_max_docs": 2500000, + "ism_history_rollover_check_period_hours": 8, + "ism_history_rollover_retention_period_days": 30, + "http_max_content_length_bytes": 100000000, + "http_max_header_size_bytes": 8192, + "http_max_initial_line_length_bytes": 4096, + "indices_query_bool_max_clause_count": 1024, + "search_max_buckets": 10000, + "indices_fielddata_cache_size_percentage": 0, + "indices_memory_index_buffer_size_percentage": 10, + "indices_memory_min_index_buffer_size_mb": 48, + "indices_memory_max_index_buffer_size_mb": 0, + "indices_queries_cache_size_percentage": 10, + "indices_recovery_max_mb_per_sec": 40, + "indices_recovery_max_concurrent_file_chunks": 2, + "action_auto_create_index_enabled": true, + "action_destructive_requires_name": false, + "plugins_alerting_filter_by_backend_roles_enabled": false, + "enable_security_audit": false, + "thread_pool_search_size": 0, + "thread_pool_search_throttled_size": 0, + "thread_pool_search_throttled_queue_size": 0, + "thread_pool_search_queue_size": 0, + "thread_pool_get_size": 0, + "thread_pool_get_queue_size": 0, + "thread_pool_analyze_size": 0, + "thread_pool_analyze_queue_size": 0, + "thread_pool_write_size": 0, + "thread_pool_write_queue_size": 0, + "thread_pool_force_merge_size": 0, + "override_main_response_version": false, + "script_max_compilations_rate": "use-context", + "cluster_max_shards_per_node": 0, + "cluster_routing_allocation_node_concurrent_recoveries": 2 + } }` ) diff --git a/integration/database_config_update_test.go b/integration/database_config_update_test.go index 1ffd608ea..269d05126 100644 --- a/integration/database_config_update_test.go +++ b/integration/database_config_update_test.go @@ -124,6 +124,26 @@ var _ = suite("database/config/get", func(t *testing.T, when spec.G, it spec.S) } expect.Equal(expected, strings.TrimSpace(string(b))) + w.WriteHeader(http.StatusOK) + case "/v2/databases/opensearch-database-id/config": + auth := req.Header.Get("Authorization") + if auth != "Bearer some-magic-token" { + w.WriteHeader(http.StatusTeapot) + } + + if req.Method != http.MethodPatch { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + expected := `{"config":{"ism_history_max_age_hours":12}}` + b, err := io.ReadAll(req.Body) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + expect.Equal(expected, strings.TrimSpace(string(b))) + w.WriteHeader(http.StatusOK) default: dump, err := httputil.DumpRequest(req, true) @@ -230,4 +250,23 @@ var _ = suite("database/config/get", func(t *testing.T, when spec.G, it spec.S) expect.Empty(strings.TrimSpace(string(output))) }) }) + + when("all required flags are passed", func() { + it("updates the opensearch database config", func() { + cmd := exec.Command(builtBinaryPath, + "-t", "some-magic-token", + "-u", server.URL, + "database", + "configuration", + "update", + "--engine", "opensearch", + "opensearch-database-id", + "--config-json", `{"ism_history_max_age_hours":12}`, + ) + + output, err := cmd.CombinedOutput() + expect.NoError(err, fmt.Sprintf("received error output: %s", output)) + expect.Empty(strings.TrimSpace(string(output))) + }) + }) }) diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index e168186ff..42954928b 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -154,11 +154,13 @@ type DatabasesService interface { GetRedisConfig(context.Context, string) (*RedisConfig, *Response, error) GetMySQLConfig(context.Context, string) (*MySQLConfig, *Response, error) GetMongoDBConfig(context.Context, string) (*MongoDBConfig, *Response, error) + GetOpensearchConfig(context.Context, string) (*OpensearchConfig, *Response, error) GetKafkaConfig(context.Context, string) (*KafkaConfig, *Response, error) UpdatePostgreSQLConfig(context.Context, string, *PostgreSQLConfig) (*Response, error) UpdateRedisConfig(context.Context, string, *RedisConfig) (*Response, error) UpdateMySQLConfig(context.Context, string, *MySQLConfig) (*Response, error) UpdateMongoDBConfig(context.Context, string, *MongoDBConfig) (*Response, error) + UpdateOpensearchConfig(context.Context, string, *OpensearchConfig) (*Response, error) UpdateKafkaConfig(context.Context, string, *KafkaConfig) (*Response, error) ListOptions(todo context.Context) (*DatabaseOptions, *Response, error) UpgradeMajorVersion(context.Context, string, *UpgradeVersionRequest) (*Response, error) @@ -683,6 +685,48 @@ type KafkaConfig struct { AutoCreateTopicsEnable *bool `json:"auto_create_topics_enable,omitempty"` } +// OpensearchConfig holds advanced configurations for Opensearch database clusters. +type OpensearchConfig struct { + HttpMaxContentLengthBytes *int `json:"http_max_content_length_bytes,omitempty"` + HttpMaxHeaderSizeBytes *int `json:"http_max_header_size_bytes,omitempty"` + HttpMaxInitialLineLengthBytes *int `json:"http_max_initial_line_length_bytes,omitempty"` + IndicesQueryBoolMaxClauseCount *int `json:"indices_query_bool_max_clause_count,omitempty"` + IndicesFielddataCacheSizePercentage *int `json:"indices_fielddata_cache_size_percentage,omitempty"` + IndicesMemoryIndexBufferSizePercentage *int `json:"indices_memory_index_buffer_size_percentage,omitempty"` + IndicesMemoryMinIndexBufferSizeMb *int `json:"indices_memory_min_index_buffer_size_mb,omitempty"` + IndicesMemoryMaxIndexBufferSizeMb *int `json:"indices_memory_max_index_buffer_size_mb,omitempty"` + IndicesQueriesCacheSizePercentage *int `json:"indices_queries_cache_size_percentage,omitempty"` + IndicesRecoveryMaxMbPerSec *int `json:"indices_recovery_max_mb_per_sec,omitempty"` + IndicesRecoveryMaxConcurrentFileChunks *int `json:"indices_recovery_max_concurrent_file_chunks,omitempty"` + ThreadPoolSearchSize *int `json:"thread_pool_search_size,omitempty"` + ThreadPoolSearchThrottledSize *int `json:"thread_pool_search_throttled_size,omitempty"` + ThreadPoolGetSize *int `json:"thread_pool_get_size,omitempty"` + ThreadPoolAnalyzeSize *int `json:"thread_pool_analyze_size,omitempty"` + ThreadPoolWriteSize *int `json:"thread_pool_write_size,omitempty"` + ThreadPoolForceMergeSize *int `json:"thread_pool_force_merge_size,omitempty"` + ThreadPoolSearchQueueSize *int `json:"thread_pool_search_queue_size,omitempty"` + ThreadPoolSearchThrottledQueueSize *int `json:"thread_pool_search_throttled_queue_size,omitempty"` + ThreadPoolGetQueueSize *int `json:"thread_pool_get_queue_size,omitempty"` + ThreadPoolAnalyzeQueueSize *int `json:"thread_pool_analyze_queue_size,omitempty"` + ThreadPoolWriteQueueSize *int `json:"thread_pool_write_queue_size,omitempty"` + IsmEnabled *bool `json:"ism_enabled,omitempty"` + IsmHistoryEnabled *bool `json:"ism_history_enabled,omitempty"` + IsmHistoryMaxAgeHours *int `json:"ism_history_max_age_hours,omitempty"` + IsmHistoryMaxDocs *uint64 `json:"ism_history_max_docs,omitempty"` + IsmHistoryRolloverCheckPeriodHours *int `json:"ism_history_rollover_check_period_hours,omitempty"` + IsmHistoryRolloverRetentionPeriodDays *int `json:"ism_history_rollover_retention_period_days,omitempty"` + SearchMaxBuckets *int `json:"search_max_buckets,omitempty"` + ActionAutoCreateIndexEnabled *bool `json:"action_auto_create_index_enabled,omitempty"` + EnableSecurityAudit *bool `json:"enable_security_audit,omitempty"` + ActionDestructiveRequiresName *bool `json:"action_destructive_requires_name,omitempty"` + ClusterMaxShardsPerNode *int `json:"cluster_max_shards_per_node,omitempty"` + OverrideMainResponseVersion *bool `json:"override_main_response_version,omitempty"` + ScriptMaxCompilationsRate *string `json:"script_max_compilations_rate,omitempty"` + ClusterRoutingAllocationNodeConcurrentRecoveries *int `json:"cluster_routing_allocation_node_concurrent_recoveries,omitempty"` + ReindexRemoteWhitelist []string `json:"reindex_remote_whitelist,omitempty"` + PluginsAlertingFilterByBackendRolesEnabled *bool `json:"plugins_alerting_filter_by_backend_roles_enabled,omitempty"` +} + type databaseUserRoot struct { User *DatabaseUser `json:"user"` } @@ -727,6 +771,10 @@ type databaseMongoDBConfigRoot struct { Config *MongoDBConfig `json:"config"` } +type databaseOpensearchConfigRoot struct { + Config *OpensearchConfig `json:"config"` +} + type databaseKafkaConfigRoot struct { Config *KafkaConfig `json:"config"` } @@ -1606,6 +1654,38 @@ func (svc *DatabasesServiceOp) UpdateKafkaConfig(ctx context.Context, databaseID return resp, nil } +// GetOpensearchConfig retrieves the config for a Opensearch database cluster. +func (svc *DatabasesServiceOp) GetOpensearchConfig(ctx context.Context, databaseID string) (*OpensearchConfig, *Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(databaseOpensearchConfigRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Config, resp, nil +} + +// UpdateOpensearchConfig updates the config for a Opensearch database cluster. +func (svc *DatabasesServiceOp) UpdateOpensearchConfig(ctx context.Context, databaseID string, config *OpensearchConfig) (*Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + root := &databaseOpensearchConfigRoot{ + Config: config, + } + req, err := svc.client.NewRequest(ctx, http.MethodPatch, path, root) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + // ListOptions gets the database options available. func (svc *DatabasesServiceOp) ListOptions(ctx context.Context) (*DatabaseOptions, *Response, error) { root := new(databaseOptionsRoot) diff --git a/vendor/modules.txt b/vendor/modules.txt index 051318834..692629b60 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -61,7 +61,7 @@ github.com/creack/pty # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew -# github.com/digitalocean/godo v1.126.0 +# github.com/digitalocean/godo v1.126.1-0.20241002131132-fb61c333ae26 ## explicit; go 1.22 github.com/digitalocean/godo github.com/digitalocean/godo/metrics