diff --git a/digitalocean/database/resource_database_cluster_test.go b/digitalocean/database/resource_database_cluster_test.go index 2a0fb5f6c..685713ad9 100644 --- a/digitalocean/database/resource_database_cluster_test.go +++ b/digitalocean/database/resource_database_cluster_test.go @@ -829,6 +829,17 @@ resource "digitalocean_database_cluster" "foobar" { tags = ["production"] }` +const testAccCheckDigitalOceanDatabaseClusterKafka = ` +resource "digitalocean_database_cluster" "foobar" { + name = "%s" + engine = "kafka" + version = "%s" + size = "db-s-1vcpu-2gb" + region = "nyc1" + node_count = 3 + tags = ["production"] +}` + const testAccCheckDigitalOceanDatabaseClusterMySQL = ` resource "digitalocean_database_cluster" "foobar" { name = "%s" diff --git a/digitalocean/database/resource_database_kafka_topic.go b/digitalocean/database/resource_database_kafka_topic.go new file mode 100644 index 000000000..aae52b821 --- /dev/null +++ b/digitalocean/database/resource_database_kafka_topic.go @@ -0,0 +1,593 @@ +package database + +import ( + "context" + "errors" + "fmt" + "log" + "strconv" + "strings" + + "github.com/digitalocean/godo" + "github.com/digitalocean/terraform-provider-digitalocean/digitalocean/config" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func ResourceDigitalOceanDatabaseKafkaTopic() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceDigitalOceanDatabaseKafkaTopicCreate, + ReadContext: resourceDigitalOceanDatabaseKafkaTopicRead, + UpdateContext: resourceDigitalOceanDatabaseKafkaTopicUpdate, + DeleteContext: resourceDigitalOceanDatabaseKafkaTopicDelete, + Importer: &schema.ResourceImporter{ + State: resourceDigitalOceanDatabaseKafkaTopicImport, + }, + + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.NoZeroValues, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.NoZeroValues, + }, + "partition_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + ValidateFunc: validation.IntBetween(3, 2048), + Default: 3, + }, + "replication_factor": { + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + ValidateFunc: validation.IntAtLeast(2), + Default: 2, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: false, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cleanup_policy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{ + "delete", + "compact", + "compact_delete", + }, false), + }, + "compression_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{ + "snappy", + "gzip", + "lz4", + "producer", + "uncompressed", + "zstd", + }, false), + }, + "delete_retention_ms": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateUint64(), + }, + "file_delete_delay_ms": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateUint64(), + }, + "flush_messages": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateUint64(), + }, + "flush_ms": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateUint64(), + }, + "index_interval_bytes": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateUint64(), + }, + "max_compaction_lag_ms": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateUint64(), + }, + "max_message_bytes": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateUint64(), + }, + "message_down_conversion_enable": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "message_format_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{ + "0.8.0", + "0.8.1", + "0.8.2", + "0.9.0", + "0.10.0", + "0.10.0-IV0", + "0.10.0-IV1", + "0.10.1", + "0.10.1-IV0", + "0.10.1-IV1", + "0.10.1-IV2", + "0.10.2", + "0.10.2-IV0", + "0.11.0", + "0.11.0-IV0", + "0.11.0-IV1", + "0.11.0-IV2", + "1.0", + "1.0-IV0", + "1.1", + "1.1-IV0", + "2.0", + "2.0-IV0", + "2.0-IV1", + "2.1", + "2.1-IV0", + "2.1-IV1", + "2.1-IV2", + "2.2", + "2.2-IV0", + "2.2-IV1", + "2.3", + "2.3-IV0", + "2.3-IV1", + "2.4", + "2.4-IV0", + "2.4-IV1", + "2.5", + "2.5-IV0", + "2.6", + "2.6-IV0", + "2.7", + "2.7-IV0", + "2.7-IV1", + "2.7-IV2", + "2.8", + "2.8-IV0", + "2.8-IV1", + "3.0", + "3.0-IV0", + "3.0-IV1", + "3.1", + "3.1-IV0", + "3.2", + "3.2-IV0", + "3.3", + "3.3-IV0", + "3.3-IV1", + "3.3-IV2", + "3.3-IV3", + "3.4", + "3.4-IV0", + "3.5", + "3.5-IV0", + "3.5-IV1", + "3.5-IV2", + "3.6", + "3.6-IV0", + "3.6-IV1", + "3.6-IV2", + }, false), + }, + "message_timestamp_difference_max_ms": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateInt64(), + }, + "message_timestamp_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{ + "create_time", + "log_append_time", + }, false), + }, + "min_cleanable_dirty_ratio": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + ValidateFunc: validation.FloatBetween(0.0, 1.0), + }, + "min_compaction_lag_ms": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateUint64(), + }, + "min_insync_replicas": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "preallocate": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "retention_bytes": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateInt64(), + }, + "retention_ms": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateInt64(), + }, + "segment_bytes": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateUint64(), + }, + "segment_index_bytes": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateUint64(), + }, + "segment_jitter_ms": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateUint64(), + }, + "segment_ms": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateUint64(), + }, + "unclean_leader_election_enable": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func resourceDigitalOceanDatabaseKafkaTopicCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + clusterID := d.Get("cluster_id").(string) + + partition_count := uint32(d.Get("partition_count").(int)) + replication_factor := uint32(d.Get("replication_factor").(int)) + + opts := &godo.DatabaseCreateTopicRequest{ + Name: d.Get("name").(string), + PartitionCount: &partition_count, + ReplicationFactor: &replication_factor, + } + + if v, ok := d.GetOk("config"); ok { + opts.Config = getTopicConfig(v.([]interface{})) + } + + log.Printf("[DEBUG] Database kafka topic create configuration: %#v", opts) + topic, _, err := client.Databases.CreateTopic(context.Background(), clusterID, opts) + if err != nil { + return diag.Errorf("Error creating database kafka topic: %s", err) + } + + d.SetId(makeKafkaTopicID(clusterID, topic.Name)) + log.Printf("[INFO] Database kafka topic name: %s", topic.Name) + + return resourceDigitalOceanDatabaseKafkaTopicRead(ctx, d, meta) +} + +func resourceDigitalOceanDatabaseKafkaTopicUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + clusterID := d.Get("cluster_id").(string) + + topicName := d.Get("name").(string) + partition_count := uint32(d.Get("partition_count").(int)) + replication_factor := uint32(d.Get("replication_factor").(int)) + + opts := &godo.DatabaseUpdateTopicRequest{ + PartitionCount: &partition_count, + ReplicationFactor: &replication_factor, + } + + if v, ok := d.GetOk("config"); ok { + opts.Config = getTopicConfig(v.([]interface{})) + } + + log.Printf("[DEBUG] Database kafka topic update configuration: %#v", opts) + _, err := client.Databases.UpdateTopic(context.Background(), clusterID, topicName, opts) + if err != nil { + return diag.Errorf("Error updating database kafka topic: %s", err) + } + + return resourceDigitalOceanDatabaseKafkaTopicRead(ctx, d, meta) +} + +func resourceDigitalOceanDatabaseKafkaTopicRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + clusterID := d.Get("cluster_id").(string) + topicName := d.Get("name").(string) + + topic, resp, err := client.Databases.GetTopic(ctx, clusterID, topicName) + if err != nil { + if resp != nil && resp.StatusCode == 404 { + d.SetId("") + return nil + } + + return diag.Errorf("Error retrieving kafka topic: %s", err) + } + + d.Set("state", topic.State) + d.Set("replication_factor", topic.ReplicationFactor) + // updating 'partition_count' is async, the number of partitions returned in the API will not be updated immeadiately in the response + // setting this property to the current state rather than the number of `partitions` returned in the GetTopic response + d.Set("partition_count", d.Get("partition_count").(int)) + + if err := d.Set("config", flattenTopicConfig(topic.Config)); err != nil { + return diag.Errorf("Error setting topic config: %#v", err) + } + + return nil +} + +func resourceDigitalOceanDatabaseKafkaTopicDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + clusterID := d.Get("cluster_id").(string) + topicName := d.Get("name").(string) + + log.Printf("[INFO] Deleting kafka topic: %s", d.Id()) + _, err := client.Databases.DeleteTopic(ctx, clusterID, topicName) + if err != nil { + return diag.Errorf("Error deleting kafka topic: %s", err) + } + + d.SetId("") + return nil +} +func flattenTopicConfig(config *godo.TopicConfig) []map[string]interface{} { + result := make([]map[string]interface{}, 0) + item := make(map[string]interface{}) + + item["cleanup_policy"] = config.CleanupPolicy + item["compression_type"] = config.CompressionType + item["delete_retention_ms"] = strconv.FormatUint(*config.DeleteRetentionMS, 10) + item["file_delete_delay_ms"] = strconv.FormatUint(*config.FileDeleteDelayMS, 10) + item["flush_messages"] = strconv.FormatUint(*config.FlushMessages, 10) + item["flush_ms"] = strconv.FormatUint(*config.FlushMS, 10) + item["index_interval_bytes"] = strconv.FormatUint(*config.IndexIntervalBytes, 10) + item["max_compaction_lag_ms"] = strconv.FormatUint(*config.MaxCompactionLagMS, 10) + item["max_message_bytes"] = strconv.FormatUint(*config.MaxMessageBytes, 10) + item["message_down_conversion_enable"] = *config.MessageDownConversionEnable + item["message_format_version"] = config.MessageFormatVersion + item["message_timestamp_difference_max_ms"] = strconv.FormatUint(*config.MessageTimestampDifferenceMaxMS, 10) + item["message_timestamp_type"] = config.MessageTimestampType + item["min_cleanable_dirty_ratio"] = *config.MinCleanableDirtyRatio + item["min_compaction_lag_ms"] = strconv.FormatUint(*config.MinCompactionLagMS, 10) + item["min_insync_replicas"] = int(*config.MinInsyncReplicas) + item["retention_bytes"] = strconv.FormatInt(*config.RetentionBytes, 10) + item["retention_ms"] = strconv.FormatInt(*config.RetentionMS, 10) + item["segment_bytes"] = strconv.FormatUint(*config.SegmentBytes, 10) + item["segment_index_bytes"] = strconv.FormatUint(*config.SegmentIndexBytes, 10) + item["segment_jitter_ms"] = strconv.FormatUint(*config.SegmentJitterMS, 10) + item["segment_ms"] = strconv.FormatUint(*config.SegmentMS, 10) + item["unclean_leader_election_enable"] = *config.UncleanLeaderElectionEnable + result = append(result, item) + + return result +} + +func makeKafkaTopicID(clusterID string, name string) string { + return fmt.Sprintf("%s/topic/%s", clusterID, name) +} + +func validateInt64() schema.SchemaValidateFunc { + return func(i interface{}, k string) (warnings []string, errors []error) { + _, err := strconv.ParseInt(i.(string), 10, 64) + if err != nil { + errors = append(errors, fmt.Errorf("expected type of %s to be int64", k)) + return warnings, errors + } + return warnings, errors + } +} + +func validateUint64() schema.SchemaValidateFunc { + return func(i interface{}, k string) (warnings []string, errors []error) { + _, err := strconv.ParseUint(i.(string), 10, 64) + if err != nil { + errors = append(errors, fmt.Errorf("expected type of %s to be uint64", k)) + return warnings, errors + } + return warnings, errors + } +} + +func getTopicConfig(raw []interface{}) *godo.TopicConfig { + res := &godo.TopicConfig{} + res.CleanupPolicy = "compact_delete" + for _, kv := range raw { + cfg := kv.(map[string]interface{}) + + if v, ok := cfg["cleanup_policy"]; ok { + res.CleanupPolicy = v.(string) + } + if v, ok := cfg["compression_type"]; ok { + res.CompressionType = v.(string) + } + if v, ok := cfg["delete_retention_ms"]; ok { + v, err := strconv.ParseUint(v.(string), 10, 64) + if err == nil { + res.DeleteRetentionMS = godo.PtrTo(v) + } + } + if v, ok := cfg["file_delete_delay_ms"]; ok { + v, err := strconv.ParseUint(v.(string), 10, 64) + if err == nil { + res.FileDeleteDelayMS = godo.PtrTo(v) + } + } + if v, ok := cfg["flush_messages"]; ok { + v, err := strconv.ParseUint(v.(string), 10, 64) + if err == nil { + res.FlushMessages = godo.PtrTo(v) + } + } + if v, ok := cfg["flush_ms"]; ok { + v, err := strconv.ParseUint(v.(string), 10, 64) + if err == nil { + res.FlushMS = godo.PtrTo(v) + } + } + if v, ok := cfg["index_interval_bytes"]; ok { + v, err := strconv.ParseUint(v.(string), 10, 64) + if err == nil { + res.IndexIntervalBytes = godo.PtrTo(v) + } + } + if v, ok := cfg["max_compaction_lag_ms"]; ok { + v, err := strconv.ParseUint(v.(string), 10, 64) + if err == nil { + res.MaxCompactionLagMS = godo.PtrTo(v) + } + } + if v, ok := cfg["max_message_bytes"]; ok { + v, err := strconv.ParseUint(v.(string), 10, 64) + if err == nil { + res.MaxMessageBytes = godo.PtrTo(v) + } + } + if v, ok := cfg["message_down_conversion_enable"]; ok { + res.MessageDownConversionEnable = godo.PtrTo(v.(bool)) + } + if v, ok := cfg["message_format_version"]; ok { + res.MessageFormatVersion = v.(string) + } + if v, ok := cfg["message_timestamp_difference_max_ms"]; ok { + v, err := strconv.ParseUint(v.(string), 10, 64) + if err == nil { + res.MessageTimestampDifferenceMaxMS = godo.PtrTo(v) + } + } + if v, ok := cfg["message_timestamp_type"]; ok { + res.MessageTimestampType = v.(string) + } + if v, ok := cfg["min_cleanable_dirty_ratio"]; ok { + res.MinCleanableDirtyRatio = godo.PtrTo(float32(v.(float64))) + } + if v, ok := cfg["min_compaction_lag_ms"]; ok { + v, err := strconv.ParseUint(v.(string), 10, 64) + if err == nil { + res.MinCompactionLagMS = godo.PtrTo(v) + } + } + if v, ok := cfg["min_insync_replicas"]; ok { + res.MinInsyncReplicas = godo.PtrTo(uint32(v.(int))) + } + if v, ok := cfg["preallocate"]; ok { + res.Preallocate = godo.PtrTo(v.(bool)) + } + if v, ok := cfg["retention_bytes"]; ok { + v, err := strconv.ParseInt(v.(string), 10, 64) + if err == nil { + res.RetentionBytes = godo.PtrTo(v) + } + } + if v, ok := cfg["retention_ms"]; ok { + v, err := strconv.ParseInt(v.(string), 10, 64) + if err == nil { + res.RetentionMS = godo.PtrTo(v) + } + } + if v, ok := cfg["segment_bytes"]; ok { + v, err := strconv.ParseUint(v.(string), 10, 64) + if err == nil { + res.SegmentBytes = godo.PtrTo(v) + } + } + if v, ok := cfg["segment_index_bytes"]; ok { + v, err := strconv.ParseUint(v.(string), 10, 64) + if err == nil { + res.SegmentIndexBytes = godo.PtrTo(v) + } + } + if v, ok := cfg["segment_jitter_ms"]; ok { + v, err := strconv.ParseUint(v.(string), 10, 64) + if err == nil { + res.SegmentJitterMS = godo.PtrTo(v) + } + } + if v, ok := cfg["segment_ms"]; ok { + v, err := strconv.ParseUint(v.(string), 10, 64) + if err == nil { + res.SegmentMS = godo.PtrTo(v) + } + } + if v, ok := cfg["unclean_leader_election_enable"]; ok { + res.UncleanLeaderElectionEnable = godo.PtrTo(v.(bool)) + } + } + + return res +} + +func resourceDigitalOceanDatabaseKafkaTopicImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + if strings.Contains(d.Id(), ",") { + s := strings.Split(d.Id(), ",") + d.SetId(makeKafkaTopicID(s[0], s[1])) + d.Set("cluster_id", s[0]) + d.Set("name", s[1]) + } else { + return nil, errors.New("must use the ID of the source kafka cluster and the name of the topic joined with a comma (e.g. `id,name`)") + } + + return []*schema.ResourceData{d}, nil +} diff --git a/digitalocean/database/resource_database_kafka_topic_test.go b/digitalocean/database/resource_database_kafka_topic_test.go new file mode 100644 index 000000000..11d94ebd1 --- /dev/null +++ b/digitalocean/database/resource_database_kafka_topic_test.go @@ -0,0 +1,188 @@ +package database_test + +import ( + "context" + "fmt" + "testing" + + "github.com/digitalocean/terraform-provider-digitalocean/digitalocean/acceptance" + "github.com/digitalocean/terraform-provider-digitalocean/digitalocean/config" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccDigitalOceanDatabaseKafkaTopic(t *testing.T) { + name := acceptance.RandomTestName() + dbConfig := fmt.Sprintf(testAccCheckDigitalOceanDatabaseClusterKafka, name, "3.5") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.TestAccPreCheck(t) }, + ProviderFactories: acceptance.TestAccProviderFactories, + CheckDestroy: testAccCheckDigitalOceanDatabaseKafkaTopicDestroy, + Steps: []resource.TestStep{ + { + Config: fmt.Sprintf(testAccCheckDigitalOceanDatabaseKafkaTopicBasic, dbConfig, "topic-foobar"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "digitalocean_database_kafka_topic.foobar", "name", "topic-foobar"), + resource.TestCheckResourceAttr( + "digitalocean_database_kafka_topic.foobar", "state", "active"), + resource.TestCheckResourceAttr( + "digitalocean_database_kafka_topic.foobar", "replication_factor", "2"), + resource.TestCheckResourceAttr( + "digitalocean_database_kafka_topic.foobar", "partition_count", "3"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.cleanup_policy"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.compression_type"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.delete_retention_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.file_delete_delay_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.flush_messages"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.flush_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.index_interval_bytes"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.max_compaction_lag_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.message_down_conversion_enable"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.message_format_version"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.message_timestamp_difference_max_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.message_timestamp_type"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.min_cleanable_dirty_ratio"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.min_compaction_lag_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.min_insync_replicas"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.retention_bytes"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.retention_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.segment_bytes"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.segment_index_bytes"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.segment_jitter_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.segment_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.unclean_leader_election_enable"), + ), + }, + { + Config: fmt.Sprintf(testAccCheckDigitalOceanDatabaseKafkaTopicWithConfig, dbConfig, "topic-foobar", 5, 3, "compact", "snappy", 80000), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "digitalocean_database_kafka_topic.foobar", "name", "topic-foobar"), + resource.TestCheckResourceAttr( + "digitalocean_database_kafka_topic.foobar", "state", "active"), + resource.TestCheckResourceAttr( + "digitalocean_database_kafka_topic.foobar", "replication_factor", "3"), + resource.TestCheckResourceAttr( + "digitalocean_database_kafka_topic.foobar", "partition_count", "5"), + resource.TestCheckResourceAttr( + "digitalocean_database_kafka_topic.foobar", "config.0.cleanup_policy", "compact"), + resource.TestCheckResourceAttr( + "digitalocean_database_kafka_topic.foobar", "config.0.compression_type", "snappy"), + resource.TestCheckResourceAttr( + "digitalocean_database_kafka_topic.foobar", "config.0.delete_retention_ms", "80000"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.cleanup_policy"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.compression_type"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.delete_retention_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.file_delete_delay_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.flush_messages"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.flush_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.index_interval_bytes"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.max_compaction_lag_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.message_down_conversion_enable"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.message_format_version"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.message_timestamp_difference_max_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.message_timestamp_type"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.min_cleanable_dirty_ratio"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.min_compaction_lag_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.min_insync_replicas"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.retention_bytes"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.retention_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.segment_bytes"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.segment_index_bytes"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.segment_jitter_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.segment_ms"), + resource.TestCheckResourceAttrSet( + "digitalocean_database_kafka_topic.foobar", "config.0.unclean_leader_election_enable"), + ), + }, + }, + }) +} + +func testAccCheckDigitalOceanDatabaseKafkaTopicDestroy(s *terraform.State) error { + client := acceptance.TestAccProvider.Meta().(*config.CombinedConfig).GodoClient() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "digitalocean_database_kafka_topic" { + continue + } + clusterId := rs.Primary.Attributes["cluster_id"] + name := rs.Primary.Attributes["name"] + // Try to find the kafka topic + _, _, err := client.Databases.GetTopic(context.Background(), clusterId, name) + + if err == nil { + return fmt.Errorf("kafka topic still exists") + } + } + + return nil +} + +const testAccCheckDigitalOceanDatabaseKafkaTopicBasic = ` +%s + +resource "digitalocean_database_kafka_topic" "foobar" { + cluster_id = digitalocean_database_cluster.foobar.id + name = "%s" +}` + +const testAccCheckDigitalOceanDatabaseKafkaTopicWithConfig = ` +%s + +resource "digitalocean_database_kafka_topic" "foobar" { + cluster_id = digitalocean_database_cluster.foobar.id + name = "%s" + partition_count = %d + replication_factor = %d + config { + cleanup_policy = "%s" + compression_type = "%s" + delete_retention_ms = %d + } +}` diff --git a/digitalocean/provider.go b/digitalocean/provider.go index a7d9fe5fb..1b0fb73cf 100644 --- a/digitalocean/provider.go +++ b/digitalocean/provider.go @@ -148,6 +148,7 @@ func Provider() *schema.Provider { "digitalocean_database_user": database.ResourceDigitalOceanDatabaseUser(), "digitalocean_database_redis_config": database.ResourceDigitalOceanDatabaseRedisConfig(), "digitalocean_database_mysql_config": database.ResourceDigitalOceanDatabaseMySQLConfig(), + "digitalocean_database_kafka_topic": database.ResourceDigitalOceanDatabaseKafkaTopic(), "digitalocean_domain": domain.ResourceDigitalOceanDomain(), "digitalocean_droplet": droplet.ResourceDigitalOceanDroplet(), "digitalocean_droplet_snapshot": snapshot.ResourceDigitalOceanDropletSnapshot(), diff --git a/digitalocean/uptime/resource_uptime_alert.go b/digitalocean/uptime/resource_uptime_alert.go index 576ce03b2..bf0994011 100644 --- a/digitalocean/uptime/resource_uptime_alert.go +++ b/digitalocean/uptime/resource_uptime_alert.go @@ -129,7 +129,7 @@ func resourceDigitalOceanUptimeAlertCreate(ctx context.Context, d *schema.Resour Name: d.Get("name").(string), Type: d.Get("type").(string), Notifications: expandNotifications(d.Get("notifications").([]interface{})), - Comparison: d.Get("comparison").(godo.UptimeAlertComp), + Comparison: godo.UptimeAlertComp(d.Get("comparison").(string)), Threshold: d.Get("threshold").(int), Period: d.Get("period").(string), } @@ -167,7 +167,7 @@ func resourceDigitalOceanUptimeAlertUpdate(ctx context.Context, d *schema.Resour } if v, ok := d.GetOk("comparison"); ok { - opts.Comparison = v.(godo.UptimeAlertComp) + opts.Comparison = godo.UptimeAlertComp(v.(string)) } if v, ok := d.GetOk("threshold"); ok { opts.Threshold = v.(int) diff --git a/docs/resources/database_cluster.md b/docs/resources/database_cluster.md index ffab354d7..bb0202301 100644 --- a/docs/resources/database_cluster.md +++ b/docs/resources/database_cluster.md @@ -44,6 +44,18 @@ resource "digitalocean_database_cluster" "redis-example" { } ``` +### Create a new Kafka database cluster +```hcl +resource "digitalocean_database_cluster" "kafka-example" { + name = "example-kafka-cluster" + engine = "kafka" + version = "3.5" + size = "db-s-1vcpu-2gb" + region = "nyc1" + node_count = 3 +} +``` + ### Create a new MongoDB database cluster ```hcl resource "digitalocean_database_cluster" "mongodb-example" { @@ -92,10 +104,10 @@ resource "digitalocean_database_cluster" "doby_backup" { The following arguments are supported: * `name` - (Required) The name of the database cluster. -* `engine` - (Required) Database engine used by the cluster (ex. `pg` for PostreSQL, `mysql` for MySQL, `redis` for Redis, or `mongodb` for MongoDB). +* `engine` - (Required) Database engine used by the cluster (ex. `pg` for PostreSQL, `mysql` for MySQL, `redis` for Redis, `mongodb` for MongoDB, or `kafka` for Kafka). * `size` - (Required) Database Droplet size associated with the cluster (ex. `db-s-1vcpu-1gb`). See here for a [list of valid size slugs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Databases). * `region` - (Required) DigitalOcean region where the cluster will reside. -* `node_count` - (Required) Number of nodes that will be included in the cluster. +* `node_count` - (Required) Number of nodes that will be included in the cluster. For `kafka` clusters, this must be 3. * `version` - (Required) Engine version used by the cluster (ex. `14` for PostgreSQL 14). When this value is changed, a call to the [Upgrade major Version for a Database](https://docs.digitalocean.com/reference/api/api-reference/#operation/databases_update_major_version) API operation is made with the new version. * `tags` - (Optional) A list of tag names to be applied to the database cluster. diff --git a/docs/resources/database_kafka_topic.md b/docs/resources/database_kafka_topic.md new file mode 100644 index 000000000..f24f1217d --- /dev/null +++ b/docs/resources/database_kafka_topic.md @@ -0,0 +1,110 @@ +--- +page_title: "DigitalOcean: digitalocean_database_kafka_topic" +--- + +# digitalocean\_database\_kafka\_topic + +Provides a DigitalOcean Kafka topic for Kafka clusters. + +## Example Usage + +### Create a new Kafka topic +```hcl +resource "digitalocean_database_kafka_topic" "topic-01" { + cluster_id = digitalocean_database_cluster.kafka-example.id + name = "topic-01" + partition_count = 3 + replication_factor = 2 + config { + cleanup_policy = "compact" + compression_type = "uncompressed" + delete_retention_ms = 14000 + file_delete_delay_ms = 170000 + flush_messages = 92233 + flush_ms = 92233720368 + index_interval_bytes = 40962 + max_compaction_lag_ms = 9223372036854775807 + max_message_bytes = 1048588 + message_down_conversion_enable = true + message_format_version = "3.0-IV1" + message_timestamp_difference_max_ms = 9223372036854775807 + message_timestamp_type = "log_append_time" + min_cleanable_dirty_ratio = 0.5 + min_compaction_lag_ms = 20000 + min_insync_replicas = 2 + preallocate = false + retention_bytes = -1 + retention_ms = -1 + segment_bytes = 209715200 + segment_index_bytes = 10485760 + segment_jitter_ms = 0 + segment_ms = 604800000 + unclean_leader_election_enable = true + } +} + +resource "digitalocean_database_cluster" "kafka-example" { + name = "example-kafka-cluster" + engine = "kafka" + version = "3.5" + size = "db-s-1vcpu-2gb" + region = "nyc1" + node_count = 3 + tags = ["production"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `cluster_id` - (Required) The ID of the source database cluster. Note: This must be a Kafka cluster. +* `name` - (Required) The name for the topic. +* `partition_count` - (Optional) The number of partitions for the topic. Default and minimum set at 3, maximum is 2048. +* `replication_factor` - (Optional) The number of nodes that topics are replicated across. Default and minimum set at 2, maximum is the number of nodes in the cluster. +* `config` - (Optional) A set of advanced configuration parameters. Defaults will be set for any of the parameters that are not included. + The `config` block is documented below. + +`config` supports the following: + +* `cleanup_policy` - (Optional) The topic cleanup policy that decribes whether messages should be deleted, compacted, or both when retention policies are violated. + This may be one of "delete", "compact", or "compact_delete". +* `compression_type` - (Optional) The topic compression codecs used for a given topic. + This may be one of "uncompressed", "gzip", "snappy", "lz4", "producer", "zstd". "uncompressed" indicates that there is no compression and "producer" retains the original compression codec set by the producer. +* `delete_retention_ms` - (Optional) The amount of time, in ms, that deleted records are retained. +* `file_delete_delay_ms` - (Optional) The amount of time, in ms, to wait before deleting a topic log segment from the filesystem. +* `flush_messages` - (Optional) The number of messages accumulated on a topic partition before they are flushed to disk. +* `flush_ms` - (Optional) The maximum time, in ms, that a topic is kept in memory before being flushed to disk. +* `index_interval_bytes` - (Optional) The interval, in bytes, in which entries are added to the offset index. +* `max_compaction_lag_ms` - (Optional) The maximum time, in ms, that a particular message will remain uncompacted. This will not apply if the `compression_type` is set to "uncompressed" or it is set to `producer` and the producer is not using compression. +* `max_message_bytes` - (Optional) The maximum size, in bytes, of a message. +* `message_down_conversion_enable` - (Optional) Determines whether down-conversion of message formats for consumers is enabled. +* `message_format_version` - (Optional) The version of the inter-broker protocol that will be used. This may be one of "0.8.0", "0.8.1", "0.8.2", "0.9.0", "0.10.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", "0.10.2", "0.10.2-IV0", "0.11.0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0", "1.0-IV0", "1.1", "1.1-IV0", "2.0", "2.0-IV0", "2.0-IV1", "2.1", "2.1-IV0", "2.1-IV1", "2.1-IV2", "2.2", "2.2-IV0", "2.2-IV1", "2.3", "2.3-IV0", "2.3-IV1", "2.4", "2.4-IV0", "2.4-IV1", "2.5", "2.5-IV0", "2.6", "2.6-IV0", "2.7", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8", "2.8-IV0", "2.8-IV1", "3.0", "3.0-IV0", "3.0-IV1", "3.1", "3.1-IV0", "3.2", "3.2-IV0", "3.3", "3.3-IV0", "3.3-IV1", "3.3-IV2", "3.3-IV3", "3.4", "3.4-IV0", "3.5", "3.5-IV0", "3.5-IV1", "3.5-IV2", "3.6", "3.6-IV0", "3.6-IV1", "3.6-IV2". +* `message_timestamp_difference_max_ms` - (Optional) The maximum difference, in ms, between the timestamp specific in a message and when the broker receives the message. +* `message_timestamp_type` - (Optional) Specifies which timestamp to use for the message. This may be one of "create_time" or "log_append_time". +* `min_cleanable_dirty_ratio` - (Optional) A scale between 0.0 and 1.0 which controls the frequency of the compactor. Larger values mean more frequent compactions. This is often paired with `max_compaction_lag_ms` to control the compactor frequency. +* `min_insync_replicas` - (Optional) The number of replicas that must acknowledge a write before it is considered successful. -1 is a special setting to indicate that all nodes must ack a message before a write is considered successful. +* `preallocate` - (Optional) Determines whether to preallocate a file on disk when creating a new log segment within a topic. +* `retention_bytes` - (Optional) The maximum size, in bytes, of a topic before messages are deleted. -1 is a special setting indicating that this setting has no limit. +* `retention_ms` - (Optional) The maximum time, in ms, that a topic log file is retained before deleting it. -1 is a special setting indicating that this setting has no limit. +* `segment_bytes` - (Optional) The maximum size, in bytes, of a single topic log file. +* `segment_index_bytes` - (Optional) The maximum size, in bytes, of the offset index. +* `segment_jitter_ms` - (Optional) The maximum time, in ms, subtracted from the scheduled segment disk flush time to avoid the thundering herd problem for segment flushing. +* `segment_ms` - (Optional) The maximum time, in ms, before the topic log will flush to disk. +* `unclean_leader_election_enable` - (Optional) Determines whether to allow nodes that are not part of the in-sync replica set (IRS) to be elected as leader. Note: setting this to "true" could result in data loss. + + + +## Attributes Reference + +In addition to the above arguments, the following attributes are exported: + +* `state` - The current status of the topic. Possible values are 'active', 'configuring', and 'deleting'. + +## Import + +Topics can be imported using the `id` of the source cluster and the `name` of the topic joined with a comma. For example: + +``` +terraform import digitalocean_database_kafka_topic.topic-01 245bcfd0-7f31-4ce6-a2bc-475a116cca97,topic-01 +``` diff --git a/go.mod b/go.mod index ef41a901c..afa0cbc92 100644 --- a/go.mod +++ b/go.mod @@ -11,11 +11,11 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.1 github.com/stretchr/testify v1.8.4 golang.org/x/oauth2 v0.11.0 - gopkg.in/yaml.v2 v2.3.0 + gopkg.in/yaml.v2 v2.4.0 ) require ( - github.com/agext/levenshtein v1.2.2 // indirect + github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fatih/color v1.13.0 // indirect @@ -41,6 +41,7 @@ require ( github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/kr/pretty v0.3.0 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect diff --git a/go.sum b/go.sum index 2551a56c5..b5373af72 100644 --- a/go.sum +++ b/go.sum @@ -9,8 +9,8 @@ github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C6 github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= -github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= @@ -129,8 +129,9 @@ github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgy github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -172,6 +173,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -299,13 +302,15 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/vendor/github.com/agext/levenshtein/.travis.yml b/vendor/github.com/agext/levenshtein/.travis.yml index a51a14466..68d38816f 100644 --- a/vendor/github.com/agext/levenshtein/.travis.yml +++ b/vendor/github.com/agext/levenshtein/.travis.yml @@ -3,8 +3,11 @@ sudo: false matrix: fast_finish: true include: - - go: 1.11.x + - go: 1.14.x env: TEST_METHOD=goveralls + - go: 1.13.x + - go: 1.12.x + - go: 1.11.x - go: 1.10.x - go: tip - go: 1.9.x @@ -14,6 +17,8 @@ matrix: - go: 1.5.x allow_failures: - go: tip + - go: 1.11.x + - go: 1.10.x - go: 1.9.x - go: 1.8.x - go: 1.7.x diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md index 9e4255879..d9a8ce16d 100644 --- a/vendor/github.com/agext/levenshtein/README.md +++ b/vendor/github.com/agext/levenshtein/README.md @@ -11,7 +11,7 @@ This package implements distance and similarity metrics for strings, based on th ## Project Status -v1.2.2 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. +v1.2.3 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome. diff --git a/vendor/github.com/agext/levenshtein/levenshtein.go b/vendor/github.com/agext/levenshtein/levenshtein.go index df69ce701..56d719b83 100644 --- a/vendor/github.com/agext/levenshtein/levenshtein.go +++ b/vendor/github.com/agext/levenshtein/levenshtein.go @@ -108,7 +108,7 @@ func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, for x := 0; x < l2; x++ { dy, d[doff] = d[doff], d[doff]+insCost - for d[doff] > maxCost && dlen > 0 { + for doff < l1 && d[doff] > maxCost && dlen > 0 { if str1[doff] != str2[x] { dy += subCost } diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml index 055480b9e..7348c50c0 100644 --- a/vendor/gopkg.in/yaml.v2/.travis.yml +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -11,6 +11,7 @@ go: - "1.11.x" - "1.12.x" - "1.13.x" + - "1.14.x" - "tip" go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go index d2c2308f1..acf71402c 100644 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -79,6 +79,8 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { parser.encoding = encoding } +var disableLineWrapping = false + // Create a new emitter object. func yaml_emitter_initialize(emitter *yaml_emitter_t) { *emitter = yaml_emitter_t{ @@ -86,7 +88,9 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), - best_width: -1, + } + if disableLineWrapping { + emitter.best_width = -1 } } diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go index 89650e293..30813884c 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -175,7 +175,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // Zero valued structs will be omitted if all their public // fields are zero, unless they implement an IsZero // method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. +// case the field will be excluded if IsZero returns true. // // flow Marshal using a flow style (useful for structs, // sequences and maps). @@ -464,3 +464,15 @@ func isZero(v reflect.Value) bool { } return false } + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 53b8e8a64..8c410fc13 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# github.com/agext/levenshtein v1.2.2 +# github.com/agext/levenshtein v1.2.3 ## explicit github.com/agext/levenshtein # github.com/apparentlymart/go-textseg/v13 v13.0.0 @@ -215,6 +215,8 @@ github.com/hashicorp/yamux # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 github.com/jmespath/go-jmespath +# github.com/kr/pretty v0.3.0 +## explicit; go 1.12 # github.com/mattn/go-colorable v0.1.12 ## explicit; go 1.13 github.com/mattn/go-colorable @@ -430,8 +432,8 @@ google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/emptypb google.golang.org/protobuf/types/known/timestamppb -# gopkg.in/yaml.v2 v2.3.0 -## explicit +# gopkg.in/yaml.v2 v2.4.0 +## explicit; go 1.15 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit